commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e31b050e3a856eb21a995747fc8b3a5a7f2d7d73
|
deploy/instance_settings/fffs/instance_settings.py
|
deploy/instance_settings/fffs/instance_settings.py
|
# -*- coding: utf-8 -*-
# Prefix of default document collection of this instance
FST_INSTANCE_PREFIX = "fffs"
# Organization authorized to publish these documents.
FST_ORG_NAME = u"Finansinspektionen"
FST_ORG_NAME_POSSESSIVE = u"Finansinspektionens"
# Contact information for ATOM feed
FST_ORG_CONTACT_NAME = u"Jonas Beckman"
FST_ORG_CONTACT_URL = "http://www.fi.se/"
FST_ORG_CONTACT_EMAIL = "lagrum.fffs@fi.se"
# Description of data source for Atom feed.
# These values will be supplied by Rättsinformationsprojektet.
FST_DATASET_URI = "tag:finansinspektionen.se,2009:rinfo:feed"
FST_DATASET_TITLE = u"Flöde för Finansinspektionens författningssamling"
# Make the secret key below unique, and don't share it with anybody.
|
Add settings for another instance: fffs
|
Add settings for another instance: fffs
|
Python
|
bsd-3-clause
|
rinfo/fst,kamidev/autobuild_fst,rinfo/fst,kamidev/autobuild_fst,rinfo/fst,kamidev/autobuild_fst,kamidev/autobuild_fst,rinfo/fst
|
Add settings for another instance: fffs
|
# -*- coding: utf-8 -*-
# Prefix of default document collection of this instance
FST_INSTANCE_PREFIX = "fffs"
# Organization authorized to publish these documents.
FST_ORG_NAME = u"Finansinspektionen"
FST_ORG_NAME_POSSESSIVE = u"Finansinspektionens"
# Contact information for ATOM feed
FST_ORG_CONTACT_NAME = u"Jonas Beckman"
FST_ORG_CONTACT_URL = "http://www.fi.se/"
FST_ORG_CONTACT_EMAIL = "lagrum.fffs@fi.se"
# Description of data source for Atom feed.
# These values will be supplied by Rättsinformationsprojektet.
FST_DATASET_URI = "tag:finansinspektionen.se,2009:rinfo:feed"
FST_DATASET_TITLE = u"Flöde för Finansinspektionens författningssamling"
# Make the secret key below unique, and don't share it with anybody.
|
<commit_before><commit_msg>Add settings for another instance: fffs<commit_after>
|
# -*- coding: utf-8 -*-
# Prefix of default document collection of this instance
FST_INSTANCE_PREFIX = "fffs"
# Organization authorized to publish these documents.
FST_ORG_NAME = u"Finansinspektionen"
FST_ORG_NAME_POSSESSIVE = u"Finansinspektionens"
# Contact information for ATOM feed
FST_ORG_CONTACT_NAME = u"Jonas Beckman"
FST_ORG_CONTACT_URL = "http://www.fi.se/"
FST_ORG_CONTACT_EMAIL = "lagrum.fffs@fi.se"
# Description of data source for Atom feed.
# These values will be supplied by Rättsinformationsprojektet.
FST_DATASET_URI = "tag:finansinspektionen.se,2009:rinfo:feed"
FST_DATASET_TITLE = u"Flöde för Finansinspektionens författningssamling"
# Make the secret key below unique, and don't share it with anybody.
|
Add settings for another instance: fffs# -*- coding: utf-8 -*-
# Prefix of default document collection of this instance
FST_INSTANCE_PREFIX = "fffs"
# Organization authorized to publish these documents.
FST_ORG_NAME = u"Finansinspektionen"
FST_ORG_NAME_POSSESSIVE = u"Finansinspektionens"
# Contact information for ATOM feed
FST_ORG_CONTACT_NAME = u"Jonas Beckman"
FST_ORG_CONTACT_URL = "http://www.fi.se/"
FST_ORG_CONTACT_EMAIL = "lagrum.fffs@fi.se"
# Description of data source for Atom feed.
# These values will be supplied by Rättsinformationsprojektet.
FST_DATASET_URI = "tag:finansinspektionen.se,2009:rinfo:feed"
FST_DATASET_TITLE = u"Flöde för Finansinspektionens författningssamling"
# Make the secret key below unique, and don't share it with anybody.
|
<commit_before><commit_msg>Add settings for another instance: fffs<commit_after># -*- coding: utf-8 -*-
# Prefix of default document collection of this instance
FST_INSTANCE_PREFIX = "fffs"
# Organization authorized to publish these documents.
FST_ORG_NAME = u"Finansinspektionen"
FST_ORG_NAME_POSSESSIVE = u"Finansinspektionens"
# Contact information for ATOM feed
FST_ORG_CONTACT_NAME = u"Jonas Beckman"
FST_ORG_CONTACT_URL = "http://www.fi.se/"
FST_ORG_CONTACT_EMAIL = "lagrum.fffs@fi.se"
# Description of data source for Atom feed.
# These values will be supplied by Rättsinformationsprojektet.
FST_DATASET_URI = "tag:finansinspektionen.se,2009:rinfo:feed"
FST_DATASET_TITLE = u"Flöde för Finansinspektionens författningssamling"
# Make the secret key below unique, and don't share it with anybody.
|
|
266894ae726b9764a0bc0469b8cc2147cba26566
|
alignak_backend.py
|
alignak_backend.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015:
# Frederic Mohier, frederic.mohier@gmail.com
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
This file is used to run the Alignak backend in production environment with WSGI server.
With uWSGI:
uwsgi --wsgi-file alignak_backend.py --callable app --socket 0.0.0.0:8090 --protocol=http --enable-threads
"""
from alignak_backend.app import app
|
Create file for uWSGI launch
|
Create file for uWSGI launch
|
Python
|
agpl-3.0
|
Alignak-monitoring-contrib/alignak-backend,Alignak-monitoring-contrib/alignak-backend,Alignak-monitoring-contrib/alignak-backend,Alignak-monitoring-contrib/alignak-backend
|
Create file for uWSGI launch
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015:
# Frederic Mohier, frederic.mohier@gmail.com
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
This file is used to run the Alignak backend in production environment with WSGI server.
With uWSGI:
uwsgi --wsgi-file alignak_backend.py --callable app --socket 0.0.0.0:8090 --protocol=http --enable-threads
"""
from alignak_backend.app import app
|
<commit_before><commit_msg>Create file for uWSGI launch<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015:
# Frederic Mohier, frederic.mohier@gmail.com
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
This file is used to run the Alignak backend in production environment with WSGI server.
With uWSGI:
uwsgi --wsgi-file alignak_backend.py --callable app --socket 0.0.0.0:8090 --protocol=http --enable-threads
"""
from alignak_backend.app import app
|
Create file for uWSGI launch#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015:
# Frederic Mohier, frederic.mohier@gmail.com
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
This file is used to run the Alignak backend in production environment with WSGI server.
With uWSGI:
uwsgi --wsgi-file alignak_backend.py --callable app --socket 0.0.0.0:8090 --protocol=http --enable-threads
"""
from alignak_backend.app import app
|
<commit_before><commit_msg>Create file for uWSGI launch<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015:
# Frederic Mohier, frederic.mohier@gmail.com
#
# This file is part of (WebUI).
#
# (WebUI) is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# (WebUI) is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with (WebUI). If not, see <http://www.gnu.org/licenses/>.
"""
This file is used to run the Alignak backend in production environment with WSGI server.
With uWSGI:
uwsgi --wsgi-file alignak_backend.py --callable app --socket 0.0.0.0:8090 --protocol=http --enable-threads
"""
from alignak_backend.app import app
|
|
6f50e075d757eb76a1a3da5376430e7bea850e58
|
dbaas/maintenance/scripts/update_volume_path.py
|
dbaas/maintenance/scripts/update_volume_path.py
|
from backup.models import Volume
from workflow.steps.util.volume_provider import VolumeProviderBase
for vol in Volume.objects.all():
vol_inst = vol.host and vol.host.instances.first()
if not vol_inst:
continue
provider = VolumeProviderBase(vol_inst)
vol_path = provider.get_path(vol)
if not vol_path:
continue
for snap in vol.backups.all():
snap.volume_path = vol_path
snap.save()
|
Create Script to populate volume_path field
|
Create Script to populate volume_path field
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Create Script to populate volume_path field
|
from backup.models import Volume
from workflow.steps.util.volume_provider import VolumeProviderBase
for vol in Volume.objects.all():
vol_inst = vol.host and vol.host.instances.first()
if not vol_inst:
continue
provider = VolumeProviderBase(vol_inst)
vol_path = provider.get_path(vol)
if not vol_path:
continue
for snap in vol.backups.all():
snap.volume_path = vol_path
snap.save()
|
<commit_before><commit_msg>Create Script to populate volume_path field<commit_after>
|
from backup.models import Volume
from workflow.steps.util.volume_provider import VolumeProviderBase
for vol in Volume.objects.all():
vol_inst = vol.host and vol.host.instances.first()
if not vol_inst:
continue
provider = VolumeProviderBase(vol_inst)
vol_path = provider.get_path(vol)
if not vol_path:
continue
for snap in vol.backups.all():
snap.volume_path = vol_path
snap.save()
|
Create Script to populate volume_path fieldfrom backup.models import Volume
from workflow.steps.util.volume_provider import VolumeProviderBase
for vol in Volume.objects.all():
vol_inst = vol.host and vol.host.instances.first()
if not vol_inst:
continue
provider = VolumeProviderBase(vol_inst)
vol_path = provider.get_path(vol)
if not vol_path:
continue
for snap in vol.backups.all():
snap.volume_path = vol_path
snap.save()
|
<commit_before><commit_msg>Create Script to populate volume_path field<commit_after>from backup.models import Volume
from workflow.steps.util.volume_provider import VolumeProviderBase
for vol in Volume.objects.all():
vol_inst = vol.host and vol.host.instances.first()
if not vol_inst:
continue
provider = VolumeProviderBase(vol_inst)
vol_path = provider.get_path(vol)
if not vol_path:
continue
for snap in vol.backups.all():
snap.volume_path = vol_path
snap.save()
|
|
bf84e7203bfba55f7d9851d6b07e0e93c564b838
|
etl/ecsv/base.py
|
etl/ecsv/base.py
|
import os
import glob
class BaseCSV(object):
def __init__(self, settings):
self.directory = settings.CSV_DATA_DIR
self.data_files = settings.CSV_FILES
def handles(self):
for data_file in self.data_files:
for filename in glob.glob(os.path.join(self.directory, data_file)):
with open(filename, 'r') as fh:
yield fh
|
Create BaseCSV class in support of CSV data extraction
|
Create BaseCSV class in support of CSV data extraction
|
Python
|
mit
|
soccermetrics/marcotti-events
|
Create BaseCSV class in support of CSV data extraction
|
import os
import glob
class BaseCSV(object):
def __init__(self, settings):
self.directory = settings.CSV_DATA_DIR
self.data_files = settings.CSV_FILES
def handles(self):
for data_file in self.data_files:
for filename in glob.glob(os.path.join(self.directory, data_file)):
with open(filename, 'r') as fh:
yield fh
|
<commit_before><commit_msg>Create BaseCSV class in support of CSV data extraction<commit_after>
|
import os
import glob
class BaseCSV(object):
def __init__(self, settings):
self.directory = settings.CSV_DATA_DIR
self.data_files = settings.CSV_FILES
def handles(self):
for data_file in self.data_files:
for filename in glob.glob(os.path.join(self.directory, data_file)):
with open(filename, 'r') as fh:
yield fh
|
Create BaseCSV class in support of CSV data extractionimport os
import glob
class BaseCSV(object):
def __init__(self, settings):
self.directory = settings.CSV_DATA_DIR
self.data_files = settings.CSV_FILES
def handles(self):
for data_file in self.data_files:
for filename in glob.glob(os.path.join(self.directory, data_file)):
with open(filename, 'r') as fh:
yield fh
|
<commit_before><commit_msg>Create BaseCSV class in support of CSV data extraction<commit_after>import os
import glob
class BaseCSV(object):
def __init__(self, settings):
self.directory = settings.CSV_DATA_DIR
self.data_files = settings.CSV_FILES
def handles(self):
for data_file in self.data_files:
for filename in glob.glob(os.path.join(self.directory, data_file)):
with open(filename, 'r') as fh:
yield fh
|
|
c5625e0a9e5d0b715d24bfa40e26ed25a8514f54
|
examples/02-plot/plot-cmap.py
|
examples/02-plot/plot-cmap.py
|
"""
Custom Colormaps
~~~~~~~~~~~~~~~~
Use a custom built colormap when plotting scalar values.
"""
################################################################################
# Any colormap built for ``matplotlib`` is fully compatible with ``vtki``.
# Colormaps are typically specifiedby passing the string name of the
# ``matplotlib`` colormap to the plotting routine via the ``cmap`` argument.
#
# See `this page` for a complete list of available colormaps.
# .. _this page: https://matplotlib.org/tutorials/colors/colormaps.html
#
# To get started using a custom colormap, download some data with scalars to
# plot.
import vtki
from vtki import examples
import matplotlib.pyplot as plt
mesh = examples.download_st_helens().warp_by_scalar()
################################################################################
# Build a custom colormap - here we just make a viridis map with 5 discrete
# colors, but you could make this as complex or simple as you desire.
cmap = plt.cm.get_cmap('viridis', 5)
################################################################################
# Simply pass the colormap to the plotting routine!
mesh.plot(cmap=cmap, cpos='xy')
|
Add example page for custom colormaps (incomplete)
|
Add example page for custom colormaps (incomplete)
|
Python
|
mit
|
akaszynski/vtkInterface
|
Add example page for custom colormaps (incomplete)
|
"""
Custom Colormaps
~~~~~~~~~~~~~~~~
Use a custom built colormap when plotting scalar values.
"""
################################################################################
# Any colormap built for ``matplotlib`` is fully compatible with ``vtki``.
# Colormaps are typically specifiedby passing the string name of the
# ``matplotlib`` colormap to the plotting routine via the ``cmap`` argument.
#
# See `this page` for a complete list of available colormaps.
# .. _this page: https://matplotlib.org/tutorials/colors/colormaps.html
#
# To get started using a custom colormap, download some data with scalars to
# plot.
import vtki
from vtki import examples
import matplotlib.pyplot as plt
mesh = examples.download_st_helens().warp_by_scalar()
################################################################################
# Build a custom colormap - here we just make a viridis map with 5 discrete
# colors, but you could make this as complex or simple as you desire.
cmap = plt.cm.get_cmap('viridis', 5)
################################################################################
# Simply pass the colormap to the plotting routine!
mesh.plot(cmap=cmap, cpos='xy')
|
<commit_before><commit_msg>Add example page for custom colormaps (incomplete)<commit_after>
|
"""
Custom Colormaps
~~~~~~~~~~~~~~~~
Use a custom built colormap when plotting scalar values.
"""
################################################################################
# Any colormap built for ``matplotlib`` is fully compatible with ``vtki``.
# Colormaps are typically specifiedby passing the string name of the
# ``matplotlib`` colormap to the plotting routine via the ``cmap`` argument.
#
# See `this page` for a complete list of available colormaps.
# .. _this page: https://matplotlib.org/tutorials/colors/colormaps.html
#
# To get started using a custom colormap, download some data with scalars to
# plot.
import vtki
from vtki import examples
import matplotlib.pyplot as plt
mesh = examples.download_st_helens().warp_by_scalar()
################################################################################
# Build a custom colormap - here we just make a viridis map with 5 discrete
# colors, but you could make this as complex or simple as you desire.
cmap = plt.cm.get_cmap('viridis', 5)
################################################################################
# Simply pass the colormap to the plotting routine!
mesh.plot(cmap=cmap, cpos='xy')
|
Add example page for custom colormaps (incomplete)"""
Custom Colormaps
~~~~~~~~~~~~~~~~
Use a custom built colormap when plotting scalar values.
"""
################################################################################
# Any colormap built for ``matplotlib`` is fully compatible with ``vtki``.
# Colormaps are typically specifiedby passing the string name of the
# ``matplotlib`` colormap to the plotting routine via the ``cmap`` argument.
#
# See `this page` for a complete list of available colormaps.
# .. _this page: https://matplotlib.org/tutorials/colors/colormaps.html
#
# To get started using a custom colormap, download some data with scalars to
# plot.
import vtki
from vtki import examples
import matplotlib.pyplot as plt
mesh = examples.download_st_helens().warp_by_scalar()
################################################################################
# Build a custom colormap - here we just make a viridis map with 5 discrete
# colors, but you could make this as complex or simple as you desire.
cmap = plt.cm.get_cmap('viridis', 5)
################################################################################
# Simply pass the colormap to the plotting routine!
mesh.plot(cmap=cmap, cpos='xy')
|
<commit_before><commit_msg>Add example page for custom colormaps (incomplete)<commit_after>"""
Custom Colormaps
~~~~~~~~~~~~~~~~
Use a custom built colormap when plotting scalar values.
"""
################################################################################
# Any colormap built for ``matplotlib`` is fully compatible with ``vtki``.
# Colormaps are typically specifiedby passing the string name of the
# ``matplotlib`` colormap to the plotting routine via the ``cmap`` argument.
#
# See `this page` for a complete list of available colormaps.
# .. _this page: https://matplotlib.org/tutorials/colors/colormaps.html
#
# To get started using a custom colormap, download some data with scalars to
# plot.
import vtki
from vtki import examples
import matplotlib.pyplot as plt
mesh = examples.download_st_helens().warp_by_scalar()
################################################################################
# Build a custom colormap - here we just make a viridis map with 5 discrete
# colors, but you could make this as complex or simple as you desire.
cmap = plt.cm.get_cmap('viridis', 5)
################################################################################
# Simply pass the colormap to the plotting routine!
mesh.plot(cmap=cmap, cpos='xy')
|
|
6e27422695d84e4ca9f63f70411a106825508ebf
|
python/chunked_read.py
|
python/chunked_read.py
|
"""Read a binary file in chunks, using 'modern' idiomatic Python."""
import functools
CHUNKSIZE = 42
with open(__file__, 'rb') as handle:
read_chunk = functools.partial(handle.read, CHUNKSIZE)
for i, chunk in enumerate(iter(read_chunk, b'')):
print('{:5d} {!r}'.format(i + 1, chunk))
|
Read a binary file in chunks, using 'modern' idiomatic Python
|
Read a binary file in chunks, using 'modern' idiomatic Python
|
Python
|
unlicense
|
jhermann/waif,jhermann/waif
|
Read a binary file in chunks, using 'modern' idiomatic Python
|
"""Read a binary file in chunks, using 'modern' idiomatic Python."""
import functools
CHUNKSIZE = 42
with open(__file__, 'rb') as handle:
read_chunk = functools.partial(handle.read, CHUNKSIZE)
for i, chunk in enumerate(iter(read_chunk, b'')):
print('{:5d} {!r}'.format(i + 1, chunk))
|
<commit_before><commit_msg>Read a binary file in chunks, using 'modern' idiomatic Python<commit_after>
|
"""Read a binary file in chunks, using 'modern' idiomatic Python."""
import functools
CHUNKSIZE = 42
with open(__file__, 'rb') as handle:
read_chunk = functools.partial(handle.read, CHUNKSIZE)
for i, chunk in enumerate(iter(read_chunk, b'')):
print('{:5d} {!r}'.format(i + 1, chunk))
|
Read a binary file in chunks, using 'modern' idiomatic Python"""Read a binary file in chunks, using 'modern' idiomatic Python."""
import functools
CHUNKSIZE = 42
with open(__file__, 'rb') as handle:
read_chunk = functools.partial(handle.read, CHUNKSIZE)
for i, chunk in enumerate(iter(read_chunk, b'')):
print('{:5d} {!r}'.format(i + 1, chunk))
|
<commit_before><commit_msg>Read a binary file in chunks, using 'modern' idiomatic Python<commit_after>"""Read a binary file in chunks, using 'modern' idiomatic Python."""
import functools
CHUNKSIZE = 42
with open(__file__, 'rb') as handle:
read_chunk = functools.partial(handle.read, CHUNKSIZE)
for i, chunk in enumerate(iter(read_chunk, b'')):
print('{:5d} {!r}'.format(i + 1, chunk))
|
|
a26a8ec3579454ab948519d48a89c5388be4dcae
|
src/problem/migrations/0007_auto_20180711_1651.py
|
src/problem/migrations/0007_auto_20180711_1651.py
|
# Generated by Django 2.0.7 on 2018-07-11 07:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0006_auto_20180703_1701'),
]
operations = [
migrations.CreateModel(
name='ProblemQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('answer', models.TextField(blank=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('problem_instance', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='problem.ProblemInstance')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '문제 Q&A',
'verbose_name_plural': '문제 Q&A들',
},
),
migrations.AlterField(
model_name='problemauthlog',
name='datetime',
field=models.DateTimeField(auto_now_add=True),
),
]
|
Add migration files for ProblemQuestion
|
Add migration files for ProblemQuestion
|
Python
|
apache-2.0
|
PLUS-POSTECH/study.plus.or.kr,PLUS-POSTECH/study.plus.or.kr,PLUS-POSTECH/study.plus.or.kr
|
Add migration files for ProblemQuestion
|
# Generated by Django 2.0.7 on 2018-07-11 07:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0006_auto_20180703_1701'),
]
operations = [
migrations.CreateModel(
name='ProblemQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('answer', models.TextField(blank=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('problem_instance', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='problem.ProblemInstance')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '문제 Q&A',
'verbose_name_plural': '문제 Q&A들',
},
),
migrations.AlterField(
model_name='problemauthlog',
name='datetime',
field=models.DateTimeField(auto_now_add=True),
),
]
|
<commit_before><commit_msg>Add migration files for ProblemQuestion<commit_after>
|
# Generated by Django 2.0.7 on 2018-07-11 07:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0006_auto_20180703_1701'),
]
operations = [
migrations.CreateModel(
name='ProblemQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('answer', models.TextField(blank=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('problem_instance', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='problem.ProblemInstance')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '문제 Q&A',
'verbose_name_plural': '문제 Q&A들',
},
),
migrations.AlterField(
model_name='problemauthlog',
name='datetime',
field=models.DateTimeField(auto_now_add=True),
),
]
|
Add migration files for ProblemQuestion# Generated by Django 2.0.7 on 2018-07-11 07:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0006_auto_20180703_1701'),
]
operations = [
migrations.CreateModel(
name='ProblemQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('answer', models.TextField(blank=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('problem_instance', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='problem.ProblemInstance')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '문제 Q&A',
'verbose_name_plural': '문제 Q&A들',
},
),
migrations.AlterField(
model_name='problemauthlog',
name='datetime',
field=models.DateTimeField(auto_now_add=True),
),
]
|
<commit_before><commit_msg>Add migration files for ProblemQuestion<commit_after># Generated by Django 2.0.7 on 2018-07-11 07:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('problem', '0006_auto_20180703_1701'),
]
operations = [
migrations.CreateModel(
name='ProblemQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.TextField()),
('answer', models.TextField(blank=True)),
('datetime', models.DateTimeField(auto_now_add=True)),
('problem_instance', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='problem.ProblemInstance')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': '문제 Q&A',
'verbose_name_plural': '문제 Q&A들',
},
),
migrations.AlterField(
model_name='problemauthlog',
name='datetime',
field=models.DateTimeField(auto_now_add=True),
),
]
|
|
300ec4c6116751c173aa304dd884d7bc85a1020f
|
py/battleships-in-a-board.py
|
py/battleships-in-a-board.py
|
class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
ans = 0
for r, row in enumerate(board):
for c, v in enumerate(row):
if v == 'X':
if r > 0 and board[r - 1][c] == 'X':
pass
elif c > 0 and board[r][c - 1] == 'X':
pass
else:
ans += 1
return ans
|
Add py solution for 419. Battleships in a Board
|
Add py solution for 419. Battleships in a Board
419. Battleships in a Board: https://leetcode.com/problems/battleships-in-a-board/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 419. Battleships in a Board
419. Battleships in a Board: https://leetcode.com/problems/battleships-in-a-board/
|
class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
ans = 0
for r, row in enumerate(board):
for c, v in enumerate(row):
if v == 'X':
if r > 0 and board[r - 1][c] == 'X':
pass
elif c > 0 and board[r][c - 1] == 'X':
pass
else:
ans += 1
return ans
|
<commit_before><commit_msg>Add py solution for 419. Battleships in a Board
419. Battleships in a Board: https://leetcode.com/problems/battleships-in-a-board/<commit_after>
|
class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
ans = 0
for r, row in enumerate(board):
for c, v in enumerate(row):
if v == 'X':
if r > 0 and board[r - 1][c] == 'X':
pass
elif c > 0 and board[r][c - 1] == 'X':
pass
else:
ans += 1
return ans
|
Add py solution for 419. Battleships in a Board
419. Battleships in a Board: https://leetcode.com/problems/battleships-in-a-board/class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
ans = 0
for r, row in enumerate(board):
for c, v in enumerate(row):
if v == 'X':
if r > 0 and board[r - 1][c] == 'X':
pass
elif c > 0 and board[r][c - 1] == 'X':
pass
else:
ans += 1
return ans
|
<commit_before><commit_msg>Add py solution for 419. Battleships in a Board
419. Battleships in a Board: https://leetcode.com/problems/battleships-in-a-board/<commit_after>class Solution(object):
def countBattleships(self, board):
"""
:type board: List[List[str]]
:rtype: int
"""
ans = 0
for r, row in enumerate(board):
for c, v in enumerate(row):
if v == 'X':
if r > 0 and board[r - 1][c] == 'X':
pass
elif c > 0 and board[r][c - 1] == 'X':
pass
else:
ans += 1
return ans
|
|
dff9ab05d0d3f7f2a8f2e69d1d1e743966dfce51
|
vpn-proxy/app/management/commands/reset_tunnels.py
|
vpn-proxy/app/management/commands/reset_tunnels.py
|
from django.core.management.base import BaseCommand
from app.models import Tunnel
class Command(BaseCommand):
help = "Create superuser if missing (Non Interactive)."
def add_arguments(self, parser):
parser.add_argument('tunnel', nargs='*', type=int)
def handle(self, *args, **kwargs):
if kwargs['tunnel']:
tunnels = Tunnel.objects.filter(id__in=kwargs['tunnel'])
else:
tunnels = Tunnel.objects.all()
for tunnel in tunnels:
self.stdout.write("Resetting tunnel %d..." % tunnel.id)
tunnel.reset()
|
Add reset tunnels management command
|
Add reset tunnels management command
|
Python
|
apache-2.0
|
pchristos/vpn-proxy,dimrozakis/vpn-proxy,pchristos/vpn-proxy,dimrozakis/vpn-proxy
|
Add reset tunnels management command
|
from django.core.management.base import BaseCommand
from app.models import Tunnel
class Command(BaseCommand):
help = "Create superuser if missing (Non Interactive)."
def add_arguments(self, parser):
parser.add_argument('tunnel', nargs='*', type=int)
def handle(self, *args, **kwargs):
if kwargs['tunnel']:
tunnels = Tunnel.objects.filter(id__in=kwargs['tunnel'])
else:
tunnels = Tunnel.objects.all()
for tunnel in tunnels:
self.stdout.write("Resetting tunnel %d..." % tunnel.id)
tunnel.reset()
|
<commit_before><commit_msg>Add reset tunnels management command<commit_after>
|
from django.core.management.base import BaseCommand
from app.models import Tunnel
class Command(BaseCommand):
help = "Create superuser if missing (Non Interactive)."
def add_arguments(self, parser):
parser.add_argument('tunnel', nargs='*', type=int)
def handle(self, *args, **kwargs):
if kwargs['tunnel']:
tunnels = Tunnel.objects.filter(id__in=kwargs['tunnel'])
else:
tunnels = Tunnel.objects.all()
for tunnel in tunnels:
self.stdout.write("Resetting tunnel %d..." % tunnel.id)
tunnel.reset()
|
Add reset tunnels management commandfrom django.core.management.base import BaseCommand
from app.models import Tunnel
class Command(BaseCommand):
help = "Create superuser if missing (Non Interactive)."
def add_arguments(self, parser):
parser.add_argument('tunnel', nargs='*', type=int)
def handle(self, *args, **kwargs):
if kwargs['tunnel']:
tunnels = Tunnel.objects.filter(id__in=kwargs['tunnel'])
else:
tunnels = Tunnel.objects.all()
for tunnel in tunnels:
self.stdout.write("Resetting tunnel %d..." % tunnel.id)
tunnel.reset()
|
<commit_before><commit_msg>Add reset tunnels management command<commit_after>from django.core.management.base import BaseCommand
from app.models import Tunnel
class Command(BaseCommand):
help = "Create superuser if missing (Non Interactive)."
def add_arguments(self, parser):
parser.add_argument('tunnel', nargs='*', type=int)
def handle(self, *args, **kwargs):
if kwargs['tunnel']:
tunnels = Tunnel.objects.filter(id__in=kwargs['tunnel'])
else:
tunnels = Tunnel.objects.all()
for tunnel in tunnels:
self.stdout.write("Resetting tunnel %d..." % tunnel.id)
tunnel.reset()
|
|
ca4048d16760a5dfadb8b7cce751ddd666976800
|
src/preprocessing/csvtohdf.py
|
src/preprocessing/csvtohdf.py
|
import pandas as pd
import numpy as np
import os
import glob
def csvtohdf(source, destination):
"""Takes a csv file as input and storest it as a hdf5 file in the
destnation path.
The hdf5 file is stored in table format.
"""
try:
data = pd.read_csv(source,index_col = 1,parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return
if data.empty:
return
data.to_hdf(destination, 'data', mode='w', format='table')
return
def alltohdf(source, destination='../hdf/'):
"""Performs storing of all .csv file present on source directory in a hdf5
data format and save in destination folder."""
if not os.path.exists(destination):
os.makedirs(destination)
os.chdir(source)
for file in glob.glob("*.csv"):
filename = os.path.basename(file)
print('Saving {}...\n'.format(filename))
csvtohdf(file, destination+filename)
return
|
Add module to save .csv files as HDF5 data structure.
|
Add module to save .csv files as HDF5 data structure.
|
Python
|
mit
|
samshara/Stock-Market-Analysis-and-Prediction
|
Add module to save .csv files as HDF5 data structure.
|
import pandas as pd
import numpy as np
import os
import glob
def csvtohdf(source, destination):
"""Takes a csv file as input and storest it as a hdf5 file in the
destnation path.
The hdf5 file is stored in table format.
"""
try:
data = pd.read_csv(source,index_col = 1,parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return
if data.empty:
return
data.to_hdf(destination, 'data', mode='w', format='table')
return
def alltohdf(source, destination='../hdf/'):
"""Performs storing of all .csv file present on source directory in a hdf5
data format and save in destination folder."""
if not os.path.exists(destination):
os.makedirs(destination)
os.chdir(source)
for file in glob.glob("*.csv"):
filename = os.path.basename(file)
print('Saving {}...\n'.format(filename))
csvtohdf(file, destination+filename)
return
|
<commit_before><commit_msg>Add module to save .csv files as HDF5 data structure.<commit_after>
|
import pandas as pd
import numpy as np
import os
import glob
def csvtohdf(source, destination):
"""Takes a csv file as input and storest it as a hdf5 file in the
destnation path.
The hdf5 file is stored in table format.
"""
try:
data = pd.read_csv(source,index_col = 1,parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return
if data.empty:
return
data.to_hdf(destination, 'data', mode='w', format='table')
return
def alltohdf(source, destination='../hdf/'):
"""Performs storing of all .csv file present on source directory in a hdf5
data format and save in destination folder."""
if not os.path.exists(destination):
os.makedirs(destination)
os.chdir(source)
for file in glob.glob("*.csv"):
filename = os.path.basename(file)
print('Saving {}...\n'.format(filename))
csvtohdf(file, destination+filename)
return
|
Add module to save .csv files as HDF5 data structure.import pandas as pd
import numpy as np
import os
import glob
def csvtohdf(source, destination):
"""Takes a csv file as input and storest it as a hdf5 file in the
destnation path.
The hdf5 file is stored in table format.
"""
try:
data = pd.read_csv(source,index_col = 1,parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return
if data.empty:
return
data.to_hdf(destination, 'data', mode='w', format='table')
return
def alltohdf(source, destination='../hdf/'):
"""Performs storing of all .csv file present on source directory in a hdf5
data format and save in destination folder."""
if not os.path.exists(destination):
os.makedirs(destination)
os.chdir(source)
for file in glob.glob("*.csv"):
filename = os.path.basename(file)
print('Saving {}...\n'.format(filename))
csvtohdf(file, destination+filename)
return
|
<commit_before><commit_msg>Add module to save .csv files as HDF5 data structure.<commit_after>import pandas as pd
import numpy as np
import os
import glob
def csvtohdf(source, destination):
"""Takes a csv file as input and storest it as a hdf5 file in the
destnation path.
The hdf5 file is stored in table format.
"""
try:
data = pd.read_csv(source,index_col = 1,parse_dates=True)
except(FileNotFoundError, IOError):
print('Wrong file or file path.')
return
if data.empty:
return
data.to_hdf(destination, 'data', mode='w', format='table')
return
def alltohdf(source, destination='../hdf/'):
"""Performs storing of all .csv file present on source directory in a hdf5
data format and save in destination folder."""
if not os.path.exists(destination):
os.makedirs(destination)
os.chdir(source)
for file in glob.glob("*.csv"):
filename = os.path.basename(file)
print('Saving {}...\n'.format(filename))
csvtohdf(file, destination+filename)
return
|
|
f9343573e2d1b3f3960c99eb90b5df7aad1573ca
|
python/problem4.py
|
python/problem4.py
|
for i in range(10, 100):
for j in range(i, 100):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
for i in range(100, 1000):
for j in range(i, 1000):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
|
Solve problem 4 with python.
|
Solve problem 4 with python.
|
Python
|
mit
|
a-suenami/challenge-project-euler,a-suenami/challenge-project-euler
|
Solve problem 4 with python.
|
for i in range(10, 100):
for j in range(i, 100):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
for i in range(100, 1000):
for j in range(i, 1000):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
|
<commit_before><commit_msg>Solve problem 4 with python.<commit_after>
|
for i in range(10, 100):
for j in range(i, 100):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
for i in range(100, 1000):
for j in range(i, 1000):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
|
Solve problem 4 with python.for i in range(10, 100):
for j in range(i, 100):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
for i in range(100, 1000):
for j in range(i, 1000):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
|
<commit_before><commit_msg>Solve problem 4 with python.<commit_after>for i in range(10, 100):
for j in range(i, 100):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
for i in range(100, 1000):
for j in range(i, 1000):
n = i * j
if str(n) == str(n)[::-1]:
print "{} x {} = {}".format(i, j, n)
|
|
beca03a39c647cbbe0ae7c388614fe655384f774
|
migrations/versions/0109_drop_old_service_flags.py
|
migrations/versions/0109_drop_old_service_flags.py
|
"""empty message
Revision ID: 0109_drop_old_service_flags
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-12 13:35:45.636618
"""
# revision identifiers, used by Alembic.
revision = '0109_drop_old_service_flags'
down_revision = '0108_change_logo_not_nullable'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_column('services', 'can_send_letters')
op.drop_column('services', 'can_send_international_sms')
op.drop_column('services_history', 'can_send_letters')
op.drop_column('services_history', 'can_send_international_sms')
def downgrade():
op.add_column('services_history', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services_history', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
|
Add migration script to drop service flags
|
Add migration script to drop service flags
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add migration script to drop service flags
|
"""empty message
Revision ID: 0109_drop_old_service_flags
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-12 13:35:45.636618
"""
# revision identifiers, used by Alembic.
revision = '0109_drop_old_service_flags'
down_revision = '0108_change_logo_not_nullable'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_column('services', 'can_send_letters')
op.drop_column('services', 'can_send_international_sms')
op.drop_column('services_history', 'can_send_letters')
op.drop_column('services_history', 'can_send_international_sms')
def downgrade():
op.add_column('services_history', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services_history', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
|
<commit_before><commit_msg>Add migration script to drop service flags<commit_after>
|
"""empty message
Revision ID: 0109_drop_old_service_flags
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-12 13:35:45.636618
"""
# revision identifiers, used by Alembic.
revision = '0109_drop_old_service_flags'
down_revision = '0108_change_logo_not_nullable'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_column('services', 'can_send_letters')
op.drop_column('services', 'can_send_international_sms')
op.drop_column('services_history', 'can_send_letters')
op.drop_column('services_history', 'can_send_international_sms')
def downgrade():
op.add_column('services_history', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services_history', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
|
Add migration script to drop service flags"""empty message
Revision ID: 0109_drop_old_service_flags
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-12 13:35:45.636618
"""
# revision identifiers, used by Alembic.
revision = '0109_drop_old_service_flags'
down_revision = '0108_change_logo_not_nullable'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_column('services', 'can_send_letters')
op.drop_column('services', 'can_send_international_sms')
op.drop_column('services_history', 'can_send_letters')
op.drop_column('services_history', 'can_send_international_sms')
def downgrade():
op.add_column('services_history', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services_history', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
|
<commit_before><commit_msg>Add migration script to drop service flags<commit_after>"""empty message
Revision ID: 0109_drop_old_service_flags
Revises: 0108_change_logo_not_nullable
Create Date: 2017-07-12 13:35:45.636618
"""
# revision identifiers, used by Alembic.
revision = '0109_drop_old_service_flags'
down_revision = '0108_change_logo_not_nullable'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_column('services', 'can_send_letters')
op.drop_column('services', 'can_send_international_sms')
op.drop_column('services_history', 'can_send_letters')
op.drop_column('services_history', 'can_send_international_sms')
def downgrade():
op.add_column('services_history', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services_history', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_international_sms', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
op.add_column('services', sa.Column('can_send_letters', sa.BOOLEAN(), server_default=sa.text('false'), autoincrement=False, nullable=False))
|
|
e75c7b2b0ef8b82dc3c3213e97fe154dafea9afd
|
polling_stations/apps/pollingstations/migrations/0013_customfinders.py
|
polling_stations/apps/pollingstations/migrations/0013_customfinders.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.update_or_create(
area_code='N07000001',
base_url='http://www.eoni.org.uk/'
+ 'Offices/Postcode-Search-Results?postcode=',
can_pass_postcode=True,
message='The Electoral Office of Northern Ireland' +
' has its own polling station finder:',
)
def remove_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.filter(
area_code__in=['N07000001', ]).delete()
class Migration(migrations.Migration):
dependencies = [
('pollingstations', '0012_auto_20170211_1443'),
]
operations = [
migrations.RunPython(create_custom_finders, remove_custom_finders),
]
|
Add CustomFinders in data migrations
|
Add CustomFinders in data migrations
I'm not 100% sure this is the best way of doing this in the long term:
Good: CustomFinders are automatically added when migrations are run,
meaning we don't need to figure out a new system for importing them.
They are also checked in to version control, meaning we should have a
good idea of what's active on any deploy.
Bad: After some time we might add and remove lots of custom finders
meaning that we have loads of migrations adding and removing them
over the history of the app. This will slow everything down and
generally not be useful.
I'm going to stick with this for the time being though, as for the moment
we only have 2, and the EONI one is likely to be there for a long time.
|
Python
|
bsd-3-clause
|
DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations
|
Add CustomFinders in data migrations
I'm not 100% sure this is the best way of doing this in the long term:
Good: CustomFinders are automatically added when migrations are run,
meaning we don't need to figure out a new system for importing them.
They are also checked in to version control, meaning we should have a
good idea of what's active on any deploy.
Bad: After some time we might add and remove lots of custom finders
meaning that we have loads of migrations adding and removing them
over the history of the app. This will slow everything down and
generally not be useful.
I'm going to stick with this for the time being though, as for the moment
we only have 2, and the EONI one is likely to be there for a long time.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.update_or_create(
area_code='N07000001',
base_url='http://www.eoni.org.uk/'
+ 'Offices/Postcode-Search-Results?postcode=',
can_pass_postcode=True,
message='The Electoral Office of Northern Ireland' +
' has its own polling station finder:',
)
def remove_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.filter(
area_code__in=['N07000001', ]).delete()
class Migration(migrations.Migration):
dependencies = [
('pollingstations', '0012_auto_20170211_1443'),
]
operations = [
migrations.RunPython(create_custom_finders, remove_custom_finders),
]
|
<commit_before><commit_msg>Add CustomFinders in data migrations
I'm not 100% sure this is the best way of doing this in the long term:
Good: CustomFinders are automatically added when migrations are run,
meaning we don't need to figure out a new system for importing them.
They are also checked in to version control, meaning we should have a
good idea of what's active on any deploy.
Bad: After some time we might add and remove lots of custom finders
meaning that we have loads of migrations adding and removing them
over the history of the app. This will slow everything down and
generally not be useful.
I'm going to stick with this for the time being though, as for the moment
we only have 2, and the EONI one is likely to be there for a long time.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.update_or_create(
area_code='N07000001',
base_url='http://www.eoni.org.uk/'
+ 'Offices/Postcode-Search-Results?postcode=',
can_pass_postcode=True,
message='The Electoral Office of Northern Ireland' +
' has its own polling station finder:',
)
def remove_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.filter(
area_code__in=['N07000001', ]).delete()
class Migration(migrations.Migration):
dependencies = [
('pollingstations', '0012_auto_20170211_1443'),
]
operations = [
migrations.RunPython(create_custom_finders, remove_custom_finders),
]
|
Add CustomFinders in data migrations
I'm not 100% sure this is the best way of doing this in the long term:
Good: CustomFinders are automatically added when migrations are run,
meaning we don't need to figure out a new system for importing them.
They are also checked in to version control, meaning we should have a
good idea of what's active on any deploy.
Bad: After some time we might add and remove lots of custom finders
meaning that we have loads of migrations adding and removing them
over the history of the app. This will slow everything down and
generally not be useful.
I'm going to stick with this for the time being though, as for the moment
we only have 2, and the EONI one is likely to be there for a long time.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.update_or_create(
area_code='N07000001',
base_url='http://www.eoni.org.uk/'
+ 'Offices/Postcode-Search-Results?postcode=',
can_pass_postcode=True,
message='The Electoral Office of Northern Ireland' +
' has its own polling station finder:',
)
def remove_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.filter(
area_code__in=['N07000001', ]).delete()
class Migration(migrations.Migration):
dependencies = [
('pollingstations', '0012_auto_20170211_1443'),
]
operations = [
migrations.RunPython(create_custom_finders, remove_custom_finders),
]
|
<commit_before><commit_msg>Add CustomFinders in data migrations
I'm not 100% sure this is the best way of doing this in the long term:
Good: CustomFinders are automatically added when migrations are run,
meaning we don't need to figure out a new system for importing them.
They are also checked in to version control, meaning we should have a
good idea of what's active on any deploy.
Bad: After some time we might add and remove lots of custom finders
meaning that we have loads of migrations adding and removing them
over the history of the app. This will slow everything down and
generally not be useful.
I'm going to stick with this for the time being though, as for the moment
we only have 2, and the EONI one is likely to be there for a long time.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def create_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.update_or_create(
area_code='N07000001',
base_url='http://www.eoni.org.uk/'
+ 'Offices/Postcode-Search-Results?postcode=',
can_pass_postcode=True,
message='The Electoral Office of Northern Ireland' +
' has its own polling station finder:',
)
def remove_custom_finders(apps, schema_editor):
CustomFinder = apps.get_model("pollingstations", "CustomFinder")
CustomFinder.objects.filter(
area_code__in=['N07000001', ]).delete()
class Migration(migrations.Migration):
dependencies = [
('pollingstations', '0012_auto_20170211_1443'),
]
operations = [
migrations.RunPython(create_custom_finders, remove_custom_finders),
]
|
|
9e04e3de5ae971fa9075d6e74fd6300abe684632
|
bashes/great3.py
|
bashes/great3.py
|
import numpy as np
import galsim
def createSource(params):
"""
Returns a GalSim model of the unlensed source specified by params.
"""
# Create the bulge component.
bulge = galsim.Sersic(flux = params['bulge_flux'],
half_light_radius = params['bulge_hlr'],
n = params['bulge_n'])
bulge.applyShear(q = params['bulge_q'],
beta = params['bulge_beta_radians']*galsim.radians)
# Is there a disk component?
if params['disk_flux'] > 0:
disk = galsim.Exponential(flux = params['disk_flux'],
half_light_radius = params['disk_hlr'])
disk.applyShear(q = params['disk_q'],
beta = params['disk_beta_radians']*galsim.radians)
source = galsim.Add(bulge,disk)
else:
source = bulge
return source
def createPSF(params):
"""
Returns a GalSim model of the optical + atmospheric PSF specified by params.
"""
# Create the optical component.
opticalPSF = galsim.OpticalPSF(lam_over_diam = params['opt_psf_lam_over_diam'],
obscuration = params['opt_psf_obscuration'],
nstruts = params['opt_psf_n_struts'],
strut_angle = params['opt_psf_strut_angle'],
pad_factor = params['opt_psf_pad_factor'],
defocus = params['opt_psf_defocus'],
astig1 = params['opt_psf_astig1'],
astig2 = params['opt_psf_astig2'],
coma1 = params['opt_psf_coma1'],
coma2 = params['opt_psf_coma2'],
trefoil1 = params['opt_psf_trefoil1'],
trefoil2 = params['opt_psf_trefoil2'],
spher = params['opt_psf_spher'])
atmosphericPSF = galsim.Kolmogorov(fwhm = params['atmos_psf_fwhm'])
atmosphericPSF.applyShear(e = params['atmos_psf_e'],
beta = params['atmos_psf_beta']*galsim.degrees)
PSF = galsim.Convolve(opticalPSF,atmosphericPSF)
return PSF
|
Add module to create GalSim models from GREAT3 truth catalog entries
|
Add module to create GalSim models from GREAT3 truth catalog entries
|
Python
|
mit
|
deepzot/bashes,deepzot/bashes
|
Add module to create GalSim models from GREAT3 truth catalog entries
|
import numpy as np
import galsim
def createSource(params):
"""
Returns a GalSim model of the unlensed source specified by params.
"""
# Create the bulge component.
bulge = galsim.Sersic(flux = params['bulge_flux'],
half_light_radius = params['bulge_hlr'],
n = params['bulge_n'])
bulge.applyShear(q = params['bulge_q'],
beta = params['bulge_beta_radians']*galsim.radians)
# Is there a disk component?
if params['disk_flux'] > 0:
disk = galsim.Exponential(flux = params['disk_flux'],
half_light_radius = params['disk_hlr'])
disk.applyShear(q = params['disk_q'],
beta = params['disk_beta_radians']*galsim.radians)
source = galsim.Add(bulge,disk)
else:
source = bulge
return source
def createPSF(params):
"""
Returns a GalSim model of the optical + atmospheric PSF specified by params.
"""
# Create the optical component.
opticalPSF = galsim.OpticalPSF(lam_over_diam = params['opt_psf_lam_over_diam'],
obscuration = params['opt_psf_obscuration'],
nstruts = params['opt_psf_n_struts'],
strut_angle = params['opt_psf_strut_angle'],
pad_factor = params['opt_psf_pad_factor'],
defocus = params['opt_psf_defocus'],
astig1 = params['opt_psf_astig1'],
astig2 = params['opt_psf_astig2'],
coma1 = params['opt_psf_coma1'],
coma2 = params['opt_psf_coma2'],
trefoil1 = params['opt_psf_trefoil1'],
trefoil2 = params['opt_psf_trefoil2'],
spher = params['opt_psf_spher'])
atmosphericPSF = galsim.Kolmogorov(fwhm = params['atmos_psf_fwhm'])
atmosphericPSF.applyShear(e = params['atmos_psf_e'],
beta = params['atmos_psf_beta']*galsim.degrees)
PSF = galsim.Convolve(opticalPSF,atmosphericPSF)
return PSF
|
<commit_before><commit_msg>Add module to create GalSim models from GREAT3 truth catalog entries<commit_after>
|
import numpy as np
import galsim
def createSource(params):
"""
Returns a GalSim model of the unlensed source specified by params.
"""
# Create the bulge component.
bulge = galsim.Sersic(flux = params['bulge_flux'],
half_light_radius = params['bulge_hlr'],
n = params['bulge_n'])
bulge.applyShear(q = params['bulge_q'],
beta = params['bulge_beta_radians']*galsim.radians)
# Is there a disk component?
if params['disk_flux'] > 0:
disk = galsim.Exponential(flux = params['disk_flux'],
half_light_radius = params['disk_hlr'])
disk.applyShear(q = params['disk_q'],
beta = params['disk_beta_radians']*galsim.radians)
source = galsim.Add(bulge,disk)
else:
source = bulge
return source
def createPSF(params):
"""
Returns a GalSim model of the optical + atmospheric PSF specified by params.
"""
# Create the optical component.
opticalPSF = galsim.OpticalPSF(lam_over_diam = params['opt_psf_lam_over_diam'],
obscuration = params['opt_psf_obscuration'],
nstruts = params['opt_psf_n_struts'],
strut_angle = params['opt_psf_strut_angle'],
pad_factor = params['opt_psf_pad_factor'],
defocus = params['opt_psf_defocus'],
astig1 = params['opt_psf_astig1'],
astig2 = params['opt_psf_astig2'],
coma1 = params['opt_psf_coma1'],
coma2 = params['opt_psf_coma2'],
trefoil1 = params['opt_psf_trefoil1'],
trefoil2 = params['opt_psf_trefoil2'],
spher = params['opt_psf_spher'])
atmosphericPSF = galsim.Kolmogorov(fwhm = params['atmos_psf_fwhm'])
atmosphericPSF.applyShear(e = params['atmos_psf_e'],
beta = params['atmos_psf_beta']*galsim.degrees)
PSF = galsim.Convolve(opticalPSF,atmosphericPSF)
return PSF
|
Add module to create GalSim models from GREAT3 truth catalog entriesimport numpy as np
import galsim
def createSource(params):
"""
Returns a GalSim model of the unlensed source specified by params.
"""
# Create the bulge component.
bulge = galsim.Sersic(flux = params['bulge_flux'],
half_light_radius = params['bulge_hlr'],
n = params['bulge_n'])
bulge.applyShear(q = params['bulge_q'],
beta = params['bulge_beta_radians']*galsim.radians)
# Is there a disk component?
if params['disk_flux'] > 0:
disk = galsim.Exponential(flux = params['disk_flux'],
half_light_radius = params['disk_hlr'])
disk.applyShear(q = params['disk_q'],
beta = params['disk_beta_radians']*galsim.radians)
source = galsim.Add(bulge,disk)
else:
source = bulge
return source
def createPSF(params):
"""
Returns a GalSim model of the optical + atmospheric PSF specified by params.
"""
# Create the optical component.
opticalPSF = galsim.OpticalPSF(lam_over_diam = params['opt_psf_lam_over_diam'],
obscuration = params['opt_psf_obscuration'],
nstruts = params['opt_psf_n_struts'],
strut_angle = params['opt_psf_strut_angle'],
pad_factor = params['opt_psf_pad_factor'],
defocus = params['opt_psf_defocus'],
astig1 = params['opt_psf_astig1'],
astig2 = params['opt_psf_astig2'],
coma1 = params['opt_psf_coma1'],
coma2 = params['opt_psf_coma2'],
trefoil1 = params['opt_psf_trefoil1'],
trefoil2 = params['opt_psf_trefoil2'],
spher = params['opt_psf_spher'])
atmosphericPSF = galsim.Kolmogorov(fwhm = params['atmos_psf_fwhm'])
atmosphericPSF.applyShear(e = params['atmos_psf_e'],
beta = params['atmos_psf_beta']*galsim.degrees)
PSF = galsim.Convolve(opticalPSF,atmosphericPSF)
return PSF
|
<commit_before><commit_msg>Add module to create GalSim models from GREAT3 truth catalog entries<commit_after>import numpy as np
import galsim
def createSource(params):
"""
Returns a GalSim model of the unlensed source specified by params.
"""
# Create the bulge component.
bulge = galsim.Sersic(flux = params['bulge_flux'],
half_light_radius = params['bulge_hlr'],
n = params['bulge_n'])
bulge.applyShear(q = params['bulge_q'],
beta = params['bulge_beta_radians']*galsim.radians)
# Is there a disk component?
if params['disk_flux'] > 0:
disk = galsim.Exponential(flux = params['disk_flux'],
half_light_radius = params['disk_hlr'])
disk.applyShear(q = params['disk_q'],
beta = params['disk_beta_radians']*galsim.radians)
source = galsim.Add(bulge,disk)
else:
source = bulge
return source
def createPSF(params):
"""
Returns a GalSim model of the optical + atmospheric PSF specified by params.
"""
# Create the optical component.
opticalPSF = galsim.OpticalPSF(lam_over_diam = params['opt_psf_lam_over_diam'],
obscuration = params['opt_psf_obscuration'],
nstruts = params['opt_psf_n_struts'],
strut_angle = params['opt_psf_strut_angle'],
pad_factor = params['opt_psf_pad_factor'],
defocus = params['opt_psf_defocus'],
astig1 = params['opt_psf_astig1'],
astig2 = params['opt_psf_astig2'],
coma1 = params['opt_psf_coma1'],
coma2 = params['opt_psf_coma2'],
trefoil1 = params['opt_psf_trefoil1'],
trefoil2 = params['opt_psf_trefoil2'],
spher = params['opt_psf_spher'])
atmosphericPSF = galsim.Kolmogorov(fwhm = params['atmos_psf_fwhm'])
atmosphericPSF.applyShear(e = params['atmos_psf_e'],
beta = params['atmos_psf_beta']*galsim.degrees)
PSF = galsim.Convolve(opticalPSF,atmosphericPSF)
return PSF
|
|
070577b8a7d94176df1b789d621ad1e258ee968e
|
senlin/tests/tempest/api/test_build_info.py
|
senlin/tests/tempest/api/test_build_info.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestBuildInfo(base.BaseSenlinTest):
@decorators.idempotent_id('bf7a8bdf-d896-49ff-a7a8-7c8fdbfc3667')
def test_get_build_info(self):
uri = '{0}/build-info'.format(self.client.version)
resp, info = self.client.get(uri)
# Verify resp of get build-info API
self.assertEqual(200, int(resp['status']))
self.assertIsNotNone(info)
for key in ['api', 'engine']:
self.assertIn(key, info)
|
Add tempest tests for build-info API
|
Add tempest tests for build-info API
Add tempest tests for build-info API
Change-Id: I74bfe7f74678e1c35404afad11d111a7b47f75fe
|
Python
|
apache-2.0
|
openstack/senlin,stackforge/senlin,stackforge/senlin,openstack/senlin,openstack/senlin
|
Add tempest tests for build-info API
Add tempest tests for build-info API
Change-Id: I74bfe7f74678e1c35404afad11d111a7b47f75fe
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestBuildInfo(base.BaseSenlinTest):
@decorators.idempotent_id('bf7a8bdf-d896-49ff-a7a8-7c8fdbfc3667')
def test_get_build_info(self):
uri = '{0}/build-info'.format(self.client.version)
resp, info = self.client.get(uri)
# Verify resp of get build-info API
self.assertEqual(200, int(resp['status']))
self.assertIsNotNone(info)
for key in ['api', 'engine']:
self.assertIn(key, info)
|
<commit_before><commit_msg>Add tempest tests for build-info API
Add tempest tests for build-info API
Change-Id: I74bfe7f74678e1c35404afad11d111a7b47f75fe<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestBuildInfo(base.BaseSenlinTest):
@decorators.idempotent_id('bf7a8bdf-d896-49ff-a7a8-7c8fdbfc3667')
def test_get_build_info(self):
uri = '{0}/build-info'.format(self.client.version)
resp, info = self.client.get(uri)
# Verify resp of get build-info API
self.assertEqual(200, int(resp['status']))
self.assertIsNotNone(info)
for key in ['api', 'engine']:
self.assertIn(key, info)
|
Add tempest tests for build-info API
Add tempest tests for build-info API
Change-Id: I74bfe7f74678e1c35404afad11d111a7b47f75fe# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestBuildInfo(base.BaseSenlinTest):
@decorators.idempotent_id('bf7a8bdf-d896-49ff-a7a8-7c8fdbfc3667')
def test_get_build_info(self):
uri = '{0}/build-info'.format(self.client.version)
resp, info = self.client.get(uri)
# Verify resp of get build-info API
self.assertEqual(200, int(resp['status']))
self.assertIsNotNone(info)
for key in ['api', 'engine']:
self.assertIn(key, info)
|
<commit_before><commit_msg>Add tempest tests for build-info API
Add tempest tests for build-info API
Change-Id: I74bfe7f74678e1c35404afad11d111a7b47f75fe<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
class TestBuildInfo(base.BaseSenlinTest):
@decorators.idempotent_id('bf7a8bdf-d896-49ff-a7a8-7c8fdbfc3667')
def test_get_build_info(self):
uri = '{0}/build-info'.format(self.client.version)
resp, info = self.client.get(uri)
# Verify resp of get build-info API
self.assertEqual(200, int(resp['status']))
self.assertIsNotNone(info)
for key in ['api', 'engine']:
self.assertIn(key, info)
|
|
e443527d28d24219c81dc1a2ec1c649de7bb16c7
|
examples/lstm_stateful_seq.py
|
examples/lstm_stateful_seq.py
|
'''Example script to predict sequence using stateful rnns.
At least 10 epochs are required before the generated text
starts sounding coherent.
'''
import numpy as np
import matplotlib.pyplot as mpl
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
epochs = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""
Generates an absolute cosine time series with the amplitude exponentially
decreasing
Keyword arguments:
amp -- amplitude of the cosine function
period -- period of the cosine function
x0 -- initial x of the time series
xn -- final x of the time series
step -- step of the time series discretization
k -- exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Creating Data')
cos = gen_cosine_amp()
print('Input shape:')
print(cos.shape)
print('Calculating expected predicted_out')
expected_out = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_out[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape')
print(expected_out.shape)
print('Creating Model')
model = Sequential()
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=True,
stateful=True))
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='rmse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
model.fit(
cos,
expected_out,
batch_size=batch_size,
verbose=1,
nb_epoch=1)
model.reset_states()
print('Predicting')
predicted_out = model.predict(cos, batch_size=batch_size)
print('Ploting Results')
mpl.subplot(2, 1, 1)
mpl.plot(expected_out)
mpl.title('Expected')
mpl.subplot(2, 1, 2)
mpl.plot(predicted_out)
mpl.title('Predicted')
mpl.show()
|
Add example of stateful LSTM sequence prediction
|
Add example of stateful LSTM sequence prediction
|
Python
|
mit
|
relh/keras,keras-team/keras,daviddiazvico/keras,nebw/keras,dolaameng/keras,kuza55/keras,keras-team/keras,DeepGnosis/keras,kemaswill/keras
|
Add example of stateful LSTM sequence prediction
|
'''Example script to predict sequence using stateful rnns.
At least 10 epochs are required before the generated text
starts sounding coherent.
'''
import numpy as np
import matplotlib.pyplot as mpl
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
epochs = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""
Generates an absolute cosine time series with the amplitude exponentially
decreasing
Keyword arguments:
amp -- amplitude of the cosine function
period -- period of the cosine function
x0 -- initial x of the time series
xn -- final x of the time series
step -- step of the time series discretization
k -- exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Creating Data')
cos = gen_cosine_amp()
print('Input shape:')
print(cos.shape)
print('Calculating expected predicted_out')
expected_out = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_out[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape')
print(expected_out.shape)
print('Creating Model')
model = Sequential()
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=True,
stateful=True))
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='rmse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
model.fit(
cos,
expected_out,
batch_size=batch_size,
verbose=1,
nb_epoch=1)
model.reset_states()
print('Predicting')
predicted_out = model.predict(cos, batch_size=batch_size)
print('Ploting Results')
mpl.subplot(2, 1, 1)
mpl.plot(expected_out)
mpl.title('Expected')
mpl.subplot(2, 1, 2)
mpl.plot(predicted_out)
mpl.title('Predicted')
mpl.show()
|
<commit_before><commit_msg>Add example of stateful LSTM sequence prediction<commit_after>
|
'''Example script to predict sequence using stateful rnns.
At least 10 epochs are required before the generated text
starts sounding coherent.
'''
import numpy as np
import matplotlib.pyplot as mpl
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
epochs = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""
Generates an absolute cosine time series with the amplitude exponentially
decreasing
Keyword arguments:
amp -- amplitude of the cosine function
period -- period of the cosine function
x0 -- initial x of the time series
xn -- final x of the time series
step -- step of the time series discretization
k -- exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Creating Data')
cos = gen_cosine_amp()
print('Input shape:')
print(cos.shape)
print('Calculating expected predicted_out')
expected_out = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_out[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape')
print(expected_out.shape)
print('Creating Model')
model = Sequential()
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=True,
stateful=True))
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='rmse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
model.fit(
cos,
expected_out,
batch_size=batch_size,
verbose=1,
nb_epoch=1)
model.reset_states()
print('Predicting')
predicted_out = model.predict(cos, batch_size=batch_size)
print('Ploting Results')
mpl.subplot(2, 1, 1)
mpl.plot(expected_out)
mpl.title('Expected')
mpl.subplot(2, 1, 2)
mpl.plot(predicted_out)
mpl.title('Predicted')
mpl.show()
|
Add example of stateful LSTM sequence prediction'''Example script to predict sequence using stateful rnns.
At least 10 epochs are required before the generated text
starts sounding coherent.
'''
import numpy as np
import matplotlib.pyplot as mpl
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
epochs = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""
Generates an absolute cosine time series with the amplitude exponentially
decreasing
Keyword arguments:
amp -- amplitude of the cosine function
period -- period of the cosine function
x0 -- initial x of the time series
xn -- final x of the time series
step -- step of the time series discretization
k -- exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Creating Data')
cos = gen_cosine_amp()
print('Input shape:')
print(cos.shape)
print('Calculating expected predicted_out')
expected_out = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_out[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape')
print(expected_out.shape)
print('Creating Model')
model = Sequential()
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=True,
stateful=True))
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='rmse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
model.fit(
cos,
expected_out,
batch_size=batch_size,
verbose=1,
nb_epoch=1)
model.reset_states()
print('Predicting')
predicted_out = model.predict(cos, batch_size=batch_size)
print('Ploting Results')
mpl.subplot(2, 1, 1)
mpl.plot(expected_out)
mpl.title('Expected')
mpl.subplot(2, 1, 2)
mpl.plot(predicted_out)
mpl.title('Predicted')
mpl.show()
|
<commit_before><commit_msg>Add example of stateful LSTM sequence prediction<commit_after>'''Example script to predict sequence using stateful rnns.
At least 10 epochs are required before the generated text
starts sounding coherent.
'''
import numpy as np
import matplotlib.pyplot as mpl
from keras.models import Sequential
from keras.layers.core import Dense
from keras.layers.recurrent import LSTM
# since we are using stateful rnn tsteps can be set to 1
tsteps = 1
batch_size = 25
epochs = 25
# number of elements ahead that are used to make the prediction
lahead = 1
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""
Generates an absolute cosine time series with the amplitude exponentially
decreasing
Keyword arguments:
amp -- amplitude of the cosine function
period -- period of the cosine function
x0 -- initial x of the time series
xn -- final x of the time series
step -- step of the time series discretization
k -- exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
print('Creating Data')
cos = gen_cosine_amp()
print('Input shape:')
print(cos.shape)
print('Calculating expected predicted_out')
expected_out = np.zeros((len(cos), 1))
for i in range(len(cos) - lahead):
expected_out[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Output shape')
print(expected_out.shape)
print('Creating Model')
model = Sequential()
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=True,
stateful=True))
model.add(
LSTM(
50,
batch_input_shape=(
batch_size,
tsteps,
1),
return_sequences=False,
stateful=True))
model.add(Dense(1))
model.compile(loss='rmse', optimizer='rmsprop')
print('Training')
for i in range(epochs):
model.fit(
cos,
expected_out,
batch_size=batch_size,
verbose=1,
nb_epoch=1)
model.reset_states()
print('Predicting')
predicted_out = model.predict(cos, batch_size=batch_size)
print('Ploting Results')
mpl.subplot(2, 1, 1)
mpl.plot(expected_out)
mpl.title('Expected')
mpl.subplot(2, 1, 2)
mpl.plot(predicted_out)
mpl.title('Predicted')
mpl.show()
|
|
83e051d35d41830cbf2d0a8104ec173e12688d69
|
saleor/cart/migrations/fix_empty_data_in_lines.py
|
saleor/cart/migrations/fix_empty_data_in_lines.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 13:02
from __future__ import unicode_literals
from django.db import migrations
def convert_lines_data(apps, schema_editor):
CartLine = apps.get_model('cart', 'CartLine')
# Iterate over all cart lines, due to wrong JSONField None handling
for line in CartLine.objects.all():
if line.data is None:
line.data = {}
line.save(update_fields=['data'])
class Migration(migrations.Migration):
dependencies = [
('cart', '0002_auto_20161014_1221'),
]
operations = [
migrations.RunPython(convert_lines_data)
]
|
Add cart lines data migration
|
Add cart lines data migration
|
Python
|
bsd-3-clause
|
mociepka/saleor,itbabu/saleor,HyperManTT/ECommerceSaleor,car3oon/saleor,KenMutemi/saleor,maferelo/saleor,KenMutemi/saleor,tfroehlich82/saleor,KenMutemi/saleor,car3oon/saleor,itbabu/saleor,mociepka/saleor,UITools/saleor,mociepka/saleor,jreigel/saleor,maferelo/saleor,car3oon/saleor,HyperManTT/ECommerceSaleor,UITools/saleor,UITools/saleor,HyperManTT/ECommerceSaleor,jreigel/saleor,tfroehlich82/saleor,tfroehlich82/saleor,UITools/saleor,jreigel/saleor,maferelo/saleor,UITools/saleor,itbabu/saleor
|
Add cart lines data migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 13:02
from __future__ import unicode_literals
from django.db import migrations
def convert_lines_data(apps, schema_editor):
CartLine = apps.get_model('cart', 'CartLine')
# Iterate over all cart lines, due to wrong JSONField None handling
for line in CartLine.objects.all():
if line.data is None:
line.data = {}
line.save(update_fields=['data'])
class Migration(migrations.Migration):
dependencies = [
('cart', '0002_auto_20161014_1221'),
]
operations = [
migrations.RunPython(convert_lines_data)
]
|
<commit_before><commit_msg>Add cart lines data migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 13:02
from __future__ import unicode_literals
from django.db import migrations
def convert_lines_data(apps, schema_editor):
CartLine = apps.get_model('cart', 'CartLine')
# Iterate over all cart lines, due to wrong JSONField None handling
for line in CartLine.objects.all():
if line.data is None:
line.data = {}
line.save(update_fields=['data'])
class Migration(migrations.Migration):
dependencies = [
('cart', '0002_auto_20161014_1221'),
]
operations = [
migrations.RunPython(convert_lines_data)
]
|
Add cart lines data migration# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 13:02
from __future__ import unicode_literals
from django.db import migrations
def convert_lines_data(apps, schema_editor):
CartLine = apps.get_model('cart', 'CartLine')
# Iterate over all cart lines, due to wrong JSONField None handling
for line in CartLine.objects.all():
if line.data is None:
line.data = {}
line.save(update_fields=['data'])
class Migration(migrations.Migration):
dependencies = [
('cart', '0002_auto_20161014_1221'),
]
operations = [
migrations.RunPython(convert_lines_data)
]
|
<commit_before><commit_msg>Add cart lines data migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-06 13:02
from __future__ import unicode_literals
from django.db import migrations
def convert_lines_data(apps, schema_editor):
CartLine = apps.get_model('cart', 'CartLine')
# Iterate over all cart lines, due to wrong JSONField None handling
for line in CartLine.objects.all():
if line.data is None:
line.data = {}
line.save(update_fields=['data'])
class Migration(migrations.Migration):
dependencies = [
('cart', '0002_auto_20161014_1221'),
]
operations = [
migrations.RunPython(convert_lines_data)
]
|
|
c17c78ba7dd1b90a0f8b51e8edb40b974ce0b801
|
astrobin/management/commands/reset_lite_counters.py
|
astrobin/management/commands/reset_lite_counters.py
|
from django.core.management.base import BaseCommand
from paypal.standard.ipn import models
from subscription.models import UserSubscription
class Command(BaseCommand):
def handle(self, *args, **options):
for us in UserSubscription.objects.filter(subscription__name = 'AstroBin Lite'):
profile = us.user.userprofile
print "Processing user: %s" % profile
profile.premium_counter = 0
profile.save()
|
Add management command to reset Lite counters
|
Add management command to reset Lite counters
|
Python
|
agpl-3.0
|
astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin
|
Add management command to reset Lite counters
|
from django.core.management.base import BaseCommand
from paypal.standard.ipn import models
from subscription.models import UserSubscription
class Command(BaseCommand):
def handle(self, *args, **options):
for us in UserSubscription.objects.filter(subscription__name = 'AstroBin Lite'):
profile = us.user.userprofile
print "Processing user: %s" % profile
profile.premium_counter = 0
profile.save()
|
<commit_before><commit_msg>Add management command to reset Lite counters<commit_after>
|
from django.core.management.base import BaseCommand
from paypal.standard.ipn import models
from subscription.models import UserSubscription
class Command(BaseCommand):
def handle(self, *args, **options):
for us in UserSubscription.objects.filter(subscription__name = 'AstroBin Lite'):
profile = us.user.userprofile
print "Processing user: %s" % profile
profile.premium_counter = 0
profile.save()
|
Add management command to reset Lite countersfrom django.core.management.base import BaseCommand
from paypal.standard.ipn import models
from subscription.models import UserSubscription
class Command(BaseCommand):
def handle(self, *args, **options):
for us in UserSubscription.objects.filter(subscription__name = 'AstroBin Lite'):
profile = us.user.userprofile
print "Processing user: %s" % profile
profile.premium_counter = 0
profile.save()
|
<commit_before><commit_msg>Add management command to reset Lite counters<commit_after>from django.core.management.base import BaseCommand
from paypal.standard.ipn import models
from subscription.models import UserSubscription
class Command(BaseCommand):
def handle(self, *args, **options):
for us in UserSubscription.objects.filter(subscription__name = 'AstroBin Lite'):
profile = us.user.userprofile
print "Processing user: %s" % profile
profile.premium_counter = 0
profile.save()
|
|
963f6ccc97b71473ffeecec47ddc5093eff117f2
|
process_my_articles.py
|
process_my_articles.py
|
from pocket import Pocket, PocketException
import simplejson
j = simplejson.loads("config.json")
p = Pocket(
consumer_key=j['consumer_key'] #'67605-7d07d07daa10f7fb8dbe2b50',
access_token=j['access_token'] #'15cc0e47-3178-44aa-99dd-9d27a7'
)
# Fetch a list of articles
try:
print(p.retrieve(offset=0, count=10))
except PocketException as e:
print(e.message)
# Add an article
p.add('https://pymotw.com/3/asyncio/')
# Start a bulk operation and commit
p.archive(1186408060).favorite(1188103217).tags_add(
1168820736, 'Python'
).tags_add(
1168820736, 'Web Development'
).commit()
|
Add the python script that processes the information
|
Add the python script that processes the information
|
Python
|
apache-2.0
|
fullbright/gary-reporter,fullbright/gary-reporter
|
Add the python script that processes the information
|
from pocket import Pocket, PocketException
import simplejson
j = simplejson.loads("config.json")
p = Pocket(
consumer_key=j['consumer_key'] #'67605-7d07d07daa10f7fb8dbe2b50',
access_token=j['access_token'] #'15cc0e47-3178-44aa-99dd-9d27a7'
)
# Fetch a list of articles
try:
print(p.retrieve(offset=0, count=10))
except PocketException as e:
print(e.message)
# Add an article
p.add('https://pymotw.com/3/asyncio/')
# Start a bulk operation and commit
p.archive(1186408060).favorite(1188103217).tags_add(
1168820736, 'Python'
).tags_add(
1168820736, 'Web Development'
).commit()
|
<commit_before><commit_msg>Add the python script that processes the information<commit_after>
|
from pocket import Pocket, PocketException
import simplejson
j = simplejson.loads("config.json")
p = Pocket(
consumer_key=j['consumer_key'] #'67605-7d07d07daa10f7fb8dbe2b50',
access_token=j['access_token'] #'15cc0e47-3178-44aa-99dd-9d27a7'
)
# Fetch a list of articles
try:
print(p.retrieve(offset=0, count=10))
except PocketException as e:
print(e.message)
# Add an article
p.add('https://pymotw.com/3/asyncio/')
# Start a bulk operation and commit
p.archive(1186408060).favorite(1188103217).tags_add(
1168820736, 'Python'
).tags_add(
1168820736, 'Web Development'
).commit()
|
Add the python script that processes the informationfrom pocket import Pocket, PocketException
import simplejson
j = simplejson.loads("config.json")
p = Pocket(
consumer_key=j['consumer_key'] #'67605-7d07d07daa10f7fb8dbe2b50',
access_token=j['access_token'] #'15cc0e47-3178-44aa-99dd-9d27a7'
)
# Fetch a list of articles
try:
print(p.retrieve(offset=0, count=10))
except PocketException as e:
print(e.message)
# Add an article
p.add('https://pymotw.com/3/asyncio/')
# Start a bulk operation and commit
p.archive(1186408060).favorite(1188103217).tags_add(
1168820736, 'Python'
).tags_add(
1168820736, 'Web Development'
).commit()
|
<commit_before><commit_msg>Add the python script that processes the information<commit_after>from pocket import Pocket, PocketException
import simplejson
j = simplejson.loads("config.json")
p = Pocket(
consumer_key=j['consumer_key'] #'67605-7d07d07daa10f7fb8dbe2b50',
access_token=j['access_token'] #'15cc0e47-3178-44aa-99dd-9d27a7'
)
# Fetch a list of articles
try:
print(p.retrieve(offset=0, count=10))
except PocketException as e:
print(e.message)
# Add an article
p.add('https://pymotw.com/3/asyncio/')
# Start a bulk operation and commit
p.archive(1186408060).favorite(1188103217).tags_add(
1168820736, 'Python'
).tags_add(
1168820736, 'Web Development'
).commit()
|
|
f17b28c9f1708d5311e409bf642e6d746715ae2a
|
experiments/run_all.py
|
experiments/run_all.py
|
from pathlib import Path
from subprocess import run
for param_file in Path('.').glob('*.json'):
print(f'Run {param_file.stem}')
run(['python', 'run.py', str(param_file)])
|
Add convenience script to run all experiments
|
Add convenience script to run all experiments
|
Python
|
mit
|
qobilidop/srcnn,qobilidop/srcnn
|
Add convenience script to run all experiments
|
from pathlib import Path
from subprocess import run
for param_file in Path('.').glob('*.json'):
print(f'Run {param_file.stem}')
run(['python', 'run.py', str(param_file)])
|
<commit_before><commit_msg>Add convenience script to run all experiments<commit_after>
|
from pathlib import Path
from subprocess import run
for param_file in Path('.').glob('*.json'):
print(f'Run {param_file.stem}')
run(['python', 'run.py', str(param_file)])
|
Add convenience script to run all experimentsfrom pathlib import Path
from subprocess import run
for param_file in Path('.').glob('*.json'):
print(f'Run {param_file.stem}')
run(['python', 'run.py', str(param_file)])
|
<commit_before><commit_msg>Add convenience script to run all experiments<commit_after>from pathlib import Path
from subprocess import run
for param_file in Path('.').glob('*.json'):
print(f'Run {param_file.stem}')
run(['python', 'run.py', str(param_file)])
|
|
e1dc7eddabc9aa9268fdbb4ecc6922e87ab892f3
|
mkt/webapps/migrations/0004_auto_20151120_0650.py
|
mkt/webapps/migrations/0004_auto_20151120_0650.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.constants.applications
class Migration(migrations.Migration):
dependencies = [
('webapps', '0003_appfeatures_has_udpsocket'),
]
operations = [
migrations.RemoveField(
model_name='geodata',
name='banner_message',
),
migrations.RemoveField(
model_name='geodata',
name='banner_regions',
),
migrations.AlterField(
model_name='addondevicetype',
name='device_type',
field=models.PositiveIntegerField(default=1, choices=[(1, mkt.constants.applications.DEVICE_DESKTOP), (2, mkt.constants.applications.DEVICE_MOBILE), (3, mkt.constants.applications.DEVICE_TABLET), (4, mkt.constants.applications.DEVICE_GAIA), (5, mkt.constants.applications.DEVICE_TV)]),
preserve_default=True,
),
]
|
Add missing migration for geodata and addondevicetype changes
|
Add missing migration for geodata and addondevicetype changes
|
Python
|
bsd-3-clause
|
washort/zamboni,ingenioustechie/zamboni,mozilla/zamboni,washort/zamboni,diox/zamboni,ddurst/zamboni,ingenioustechie/zamboni,diox/zamboni,mozilla/zamboni,washort/zamboni,jasonthomas/zamboni,ddurst/zamboni,ingenioustechie/zamboni,washort/zamboni,jasonthomas/zamboni,jasonthomas/zamboni,mozilla/zamboni,ddurst/zamboni,mozilla/zamboni,ingenioustechie/zamboni,jasonthomas/zamboni,diox/zamboni,ddurst/zamboni,diox/zamboni
|
Add missing migration for geodata and addondevicetype changes
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.constants.applications
class Migration(migrations.Migration):
dependencies = [
('webapps', '0003_appfeatures_has_udpsocket'),
]
operations = [
migrations.RemoveField(
model_name='geodata',
name='banner_message',
),
migrations.RemoveField(
model_name='geodata',
name='banner_regions',
),
migrations.AlterField(
model_name='addondevicetype',
name='device_type',
field=models.PositiveIntegerField(default=1, choices=[(1, mkt.constants.applications.DEVICE_DESKTOP), (2, mkt.constants.applications.DEVICE_MOBILE), (3, mkt.constants.applications.DEVICE_TABLET), (4, mkt.constants.applications.DEVICE_GAIA), (5, mkt.constants.applications.DEVICE_TV)]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for geodata and addondevicetype changes<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.constants.applications
class Migration(migrations.Migration):
dependencies = [
('webapps', '0003_appfeatures_has_udpsocket'),
]
operations = [
migrations.RemoveField(
model_name='geodata',
name='banner_message',
),
migrations.RemoveField(
model_name='geodata',
name='banner_regions',
),
migrations.AlterField(
model_name='addondevicetype',
name='device_type',
field=models.PositiveIntegerField(default=1, choices=[(1, mkt.constants.applications.DEVICE_DESKTOP), (2, mkt.constants.applications.DEVICE_MOBILE), (3, mkt.constants.applications.DEVICE_TABLET), (4, mkt.constants.applications.DEVICE_GAIA), (5, mkt.constants.applications.DEVICE_TV)]),
preserve_default=True,
),
]
|
Add missing migration for geodata and addondevicetype changes# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.constants.applications
class Migration(migrations.Migration):
dependencies = [
('webapps', '0003_appfeatures_has_udpsocket'),
]
operations = [
migrations.RemoveField(
model_name='geodata',
name='banner_message',
),
migrations.RemoveField(
model_name='geodata',
name='banner_regions',
),
migrations.AlterField(
model_name='addondevicetype',
name='device_type',
field=models.PositiveIntegerField(default=1, choices=[(1, mkt.constants.applications.DEVICE_DESKTOP), (2, mkt.constants.applications.DEVICE_MOBILE), (3, mkt.constants.applications.DEVICE_TABLET), (4, mkt.constants.applications.DEVICE_GAIA), (5, mkt.constants.applications.DEVICE_TV)]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add missing migration for geodata and addondevicetype changes<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import mkt.constants.applications
class Migration(migrations.Migration):
dependencies = [
('webapps', '0003_appfeatures_has_udpsocket'),
]
operations = [
migrations.RemoveField(
model_name='geodata',
name='banner_message',
),
migrations.RemoveField(
model_name='geodata',
name='banner_regions',
),
migrations.AlterField(
model_name='addondevicetype',
name='device_type',
field=models.PositiveIntegerField(default=1, choices=[(1, mkt.constants.applications.DEVICE_DESKTOP), (2, mkt.constants.applications.DEVICE_MOBILE), (3, mkt.constants.applications.DEVICE_TABLET), (4, mkt.constants.applications.DEVICE_GAIA), (5, mkt.constants.applications.DEVICE_TV)]),
preserve_default=True,
),
]
|
|
2b56ad0b288378a8ccef388605b36308c4ce6483
|
src/ggrc_basic_permissions/migrations/versions/20131211001113_2785a204a673_update_audit_permiss.py
|
src/ggrc_basic_permissions/migrations/versions/20131211001113_2785a204a673_update_audit_permiss.py
|
"""Update audit permissions.
Revision ID: 2785a204a673
Revises: c460b4f8cc3
Create Date: 2013-12-11 00:11:13.431124
"""
# revision identifiers, used by Alembic.
revision = '2785a204a673'
down_revision = 'c460b4f8cc3'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
additional_audit_objects = [
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Relationship',
'Document',
'Meeting',
]
permissions = get_role_permissions('ProgramAuditReader')
permissions['read'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditReader', permissions)
permissions = get_role_permissions('ProgramAuditEditor')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditEditor', permissions)
permissions = get_role_permissions('ProgramAuditOwner')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
permissions['delete'].extend([
'Audit',
'Request',
'DocumentationResponse',
'InterviewResponse',
'PopulationSampleResponse',
])
update_role_permissions('ProgramAuditOwner', permissions)
def downgrade():
pass
|
Update audit context permissions for the ProgramReader, ProgramEditor, and ProgramOwner roles.
|
Update audit context permissions for the ProgramReader, ProgramEditor,
and ProgramOwner roles.
|
Python
|
apache-2.0
|
VinnieJohns/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,vladan-m/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,uskudnik/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,vladan-m/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,vladan-m/ggrc-core,uskudnik/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core
|
Update audit context permissions for the ProgramReader, ProgramEditor,
and ProgramOwner roles.
|
"""Update audit permissions.
Revision ID: 2785a204a673
Revises: c460b4f8cc3
Create Date: 2013-12-11 00:11:13.431124
"""
# revision identifiers, used by Alembic.
revision = '2785a204a673'
down_revision = 'c460b4f8cc3'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
additional_audit_objects = [
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Relationship',
'Document',
'Meeting',
]
permissions = get_role_permissions('ProgramAuditReader')
permissions['read'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditReader', permissions)
permissions = get_role_permissions('ProgramAuditEditor')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditEditor', permissions)
permissions = get_role_permissions('ProgramAuditOwner')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
permissions['delete'].extend([
'Audit',
'Request',
'DocumentationResponse',
'InterviewResponse',
'PopulationSampleResponse',
])
update_role_permissions('ProgramAuditOwner', permissions)
def downgrade():
pass
|
<commit_before><commit_msg>Update audit context permissions for the ProgramReader, ProgramEditor,
and ProgramOwner roles.<commit_after>
|
"""Update audit permissions.
Revision ID: 2785a204a673
Revises: c460b4f8cc3
Create Date: 2013-12-11 00:11:13.431124
"""
# revision identifiers, used by Alembic.
revision = '2785a204a673'
down_revision = 'c460b4f8cc3'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
additional_audit_objects = [
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Relationship',
'Document',
'Meeting',
]
permissions = get_role_permissions('ProgramAuditReader')
permissions['read'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditReader', permissions)
permissions = get_role_permissions('ProgramAuditEditor')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditEditor', permissions)
permissions = get_role_permissions('ProgramAuditOwner')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
permissions['delete'].extend([
'Audit',
'Request',
'DocumentationResponse',
'InterviewResponse',
'PopulationSampleResponse',
])
update_role_permissions('ProgramAuditOwner', permissions)
def downgrade():
pass
|
Update audit context permissions for the ProgramReader, ProgramEditor,
and ProgramOwner roles.
"""Update audit permissions.
Revision ID: 2785a204a673
Revises: c460b4f8cc3
Create Date: 2013-12-11 00:11:13.431124
"""
# revision identifiers, used by Alembic.
revision = '2785a204a673'
down_revision = 'c460b4f8cc3'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
additional_audit_objects = [
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Relationship',
'Document',
'Meeting',
]
permissions = get_role_permissions('ProgramAuditReader')
permissions['read'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditReader', permissions)
permissions = get_role_permissions('ProgramAuditEditor')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditEditor', permissions)
permissions = get_role_permissions('ProgramAuditOwner')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
permissions['delete'].extend([
'Audit',
'Request',
'DocumentationResponse',
'InterviewResponse',
'PopulationSampleResponse',
])
update_role_permissions('ProgramAuditOwner', permissions)
def downgrade():
pass
|
<commit_before><commit_msg>Update audit context permissions for the ProgramReader, ProgramEditor,
and ProgramOwner roles.<commit_after>
"""Update audit permissions.
Revision ID: 2785a204a673
Revises: c460b4f8cc3
Create Date: 2013-12-11 00:11:13.431124
"""
# revision identifiers, used by Alembic.
revision = '2785a204a673'
down_revision = 'c460b4f8cc3'
import sqlalchemy as sa
from alembic import op
from datetime import datetime
from sqlalchemy.sql import table, column, select
import json
roles_table = table('roles',
column('id', sa.Integer),
column('name', sa.String),
column('permissions_json', sa.String)
)
def get_role_permissions(role):
connection = op.get_bind()
role = connection.execute(
select([roles_table.c.permissions_json])\
.where(roles_table.c.name == role)).fetchone()
return json.loads(role.permissions_json)
def update_role_permissions(role, permissions):
op.execute(roles_table\
.update()\
.values(permissions_json = json.dumps(permissions))\
.where(roles_table.c.name == role))
def upgrade():
additional_audit_objects = [
'ObjectControl',
'ObjectDocument',
'ObjectObjective',
'ObjectPerson',
'ObjectSection',
'Relationship',
'Document',
'Meeting',
]
permissions = get_role_permissions('ProgramAuditReader')
permissions['read'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditReader', permissions)
permissions = get_role_permissions('ProgramAuditEditor')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
update_role_permissions('ProgramAuditEditor', permissions)
permissions = get_role_permissions('ProgramAuditOwner')
permissions['create'].extend(additional_audit_objects)
permissions['read'].extend(additional_audit_objects)
permissions['update'].extend(additional_audit_objects)
permissions['delete'].extend(additional_audit_objects)
permissions['delete'].extend([
'Audit',
'Request',
'DocumentationResponse',
'InterviewResponse',
'PopulationSampleResponse',
])
update_role_permissions('ProgramAuditOwner', permissions)
def downgrade():
pass
|
|
537fc0f0788a9fb1a3f29a6bab5fc840b90f77da
|
src/tests/behave/api/features/steps/rpc_capabilities.py
|
src/tests/behave/api/features/steps/rpc_capabilities.py
|
import json
import time
import operator
from functools import reduce
from behave import given, then, when, step, use_step_matcher
from src.tests.behave.api.features.common.utils import read_json_from_file
@then(
"the first {service_name} task for the {device_service} device has the output contained in {out_filename}"
)
def step_the_first_task_has_output_in_file(
context, service_name, out_filename, device_service
):
"""
:type context: behave.runner.Context
:type service_name: str
:type out_filename: str
"""
service_client = context.services[service_name]["client"]
device_client = context.services[device_service]["client"]
location = context.json_location
data = read_json_from_file(out_filename, location)
stdout = data["stdout"]
returncode = data["returncode"]
stderr = data["stderr"]
device_id = context.services[device_service]["id"]
if "_sub_" in stdout:
prefix, suffix = stdout.split("_sub_")
device_resp = device_client.get(device_id)
keys = prefix.split(".")
actual_value = reduce(operator.getitem, keys, device_resp.json())
stdout = actual_value + suffix
tasks = context.services[service_name]["tasks"]
first_task = tasks[0]
task_resp = service_client.get(resource_id=first_task["task_id"])
task_message = task_resp.json()["message"]
context.check.assertEqual(
task_message["stdout"],
stdout,
msg="Stdout was {}, should be {}".format(
task_message["stdout"], stdout
),
)
context.check.assertEqual(
task_message["returncode"],
returncode,
msg="Returncode was {}, should be {}".format(
task_message["returncode"], returncode
),
)
context.check.assertEqual(
task_message["stderr"],
stderr,
msg="Stderr was {}, should be {}".format(
task_message["stderr"], stderr
),
)
|
Add a step to make sure that task output matches expected output from a json file, including when the output is dynamic
|
Add a step to make sure that task output matches expected output from a json file, including when the output is dynamic
|
Python
|
apache-2.0
|
jr0d/mercury,jr0d/mercury
|
Add a step to make sure that task output matches expected output from a json file, including when the output is dynamic
|
import json
import time
import operator
from functools import reduce
from behave import given, then, when, step, use_step_matcher
from src.tests.behave.api.features.common.utils import read_json_from_file
@then(
"the first {service_name} task for the {device_service} device has the output contained in {out_filename}"
)
def step_the_first_task_has_output_in_file(
context, service_name, out_filename, device_service
):
"""
:type context: behave.runner.Context
:type service_name: str
:type out_filename: str
"""
service_client = context.services[service_name]["client"]
device_client = context.services[device_service]["client"]
location = context.json_location
data = read_json_from_file(out_filename, location)
stdout = data["stdout"]
returncode = data["returncode"]
stderr = data["stderr"]
device_id = context.services[device_service]["id"]
if "_sub_" in stdout:
prefix, suffix = stdout.split("_sub_")
device_resp = device_client.get(device_id)
keys = prefix.split(".")
actual_value = reduce(operator.getitem, keys, device_resp.json())
stdout = actual_value + suffix
tasks = context.services[service_name]["tasks"]
first_task = tasks[0]
task_resp = service_client.get(resource_id=first_task["task_id"])
task_message = task_resp.json()["message"]
context.check.assertEqual(
task_message["stdout"],
stdout,
msg="Stdout was {}, should be {}".format(
task_message["stdout"], stdout
),
)
context.check.assertEqual(
task_message["returncode"],
returncode,
msg="Returncode was {}, should be {}".format(
task_message["returncode"], returncode
),
)
context.check.assertEqual(
task_message["stderr"],
stderr,
msg="Stderr was {}, should be {}".format(
task_message["stderr"], stderr
),
)
|
<commit_before><commit_msg>Add a step to make sure that task output matches expected output from a json file, including when the output is dynamic<commit_after>
|
import json
import time
import operator
from functools import reduce
from behave import given, then, when, step, use_step_matcher
from src.tests.behave.api.features.common.utils import read_json_from_file
@then(
"the first {service_name} task for the {device_service} device has the output contained in {out_filename}"
)
def step_the_first_task_has_output_in_file(
context, service_name, out_filename, device_service
):
"""
:type context: behave.runner.Context
:type service_name: str
:type out_filename: str
"""
service_client = context.services[service_name]["client"]
device_client = context.services[device_service]["client"]
location = context.json_location
data = read_json_from_file(out_filename, location)
stdout = data["stdout"]
returncode = data["returncode"]
stderr = data["stderr"]
device_id = context.services[device_service]["id"]
if "_sub_" in stdout:
prefix, suffix = stdout.split("_sub_")
device_resp = device_client.get(device_id)
keys = prefix.split(".")
actual_value = reduce(operator.getitem, keys, device_resp.json())
stdout = actual_value + suffix
tasks = context.services[service_name]["tasks"]
first_task = tasks[0]
task_resp = service_client.get(resource_id=first_task["task_id"])
task_message = task_resp.json()["message"]
context.check.assertEqual(
task_message["stdout"],
stdout,
msg="Stdout was {}, should be {}".format(
task_message["stdout"], stdout
),
)
context.check.assertEqual(
task_message["returncode"],
returncode,
msg="Returncode was {}, should be {}".format(
task_message["returncode"], returncode
),
)
context.check.assertEqual(
task_message["stderr"],
stderr,
msg="Stderr was {}, should be {}".format(
task_message["stderr"], stderr
),
)
|
Add a step to make sure that task output matches expected output from a json file, including when the output is dynamicimport json
import time
import operator
from functools import reduce
from behave import given, then, when, step, use_step_matcher
from src.tests.behave.api.features.common.utils import read_json_from_file
@then(
"the first {service_name} task for the {device_service} device has the output contained in {out_filename}"
)
def step_the_first_task_has_output_in_file(
context, service_name, out_filename, device_service
):
"""
:type context: behave.runner.Context
:type service_name: str
:type out_filename: str
"""
service_client = context.services[service_name]["client"]
device_client = context.services[device_service]["client"]
location = context.json_location
data = read_json_from_file(out_filename, location)
stdout = data["stdout"]
returncode = data["returncode"]
stderr = data["stderr"]
device_id = context.services[device_service]["id"]
if "_sub_" in stdout:
prefix, suffix = stdout.split("_sub_")
device_resp = device_client.get(device_id)
keys = prefix.split(".")
actual_value = reduce(operator.getitem, keys, device_resp.json())
stdout = actual_value + suffix
tasks = context.services[service_name]["tasks"]
first_task = tasks[0]
task_resp = service_client.get(resource_id=first_task["task_id"])
task_message = task_resp.json()["message"]
context.check.assertEqual(
task_message["stdout"],
stdout,
msg="Stdout was {}, should be {}".format(
task_message["stdout"], stdout
),
)
context.check.assertEqual(
task_message["returncode"],
returncode,
msg="Returncode was {}, should be {}".format(
task_message["returncode"], returncode
),
)
context.check.assertEqual(
task_message["stderr"],
stderr,
msg="Stderr was {}, should be {}".format(
task_message["stderr"], stderr
),
)
|
<commit_before><commit_msg>Add a step to make sure that task output matches expected output from a json file, including when the output is dynamic<commit_after>import json
import time
import operator
from functools import reduce
from behave import given, then, when, step, use_step_matcher
from src.tests.behave.api.features.common.utils import read_json_from_file
@then(
"the first {service_name} task for the {device_service} device has the output contained in {out_filename}"
)
def step_the_first_task_has_output_in_file(
context, service_name, out_filename, device_service
):
"""
:type context: behave.runner.Context
:type service_name: str
:type out_filename: str
"""
service_client = context.services[service_name]["client"]
device_client = context.services[device_service]["client"]
location = context.json_location
data = read_json_from_file(out_filename, location)
stdout = data["stdout"]
returncode = data["returncode"]
stderr = data["stderr"]
device_id = context.services[device_service]["id"]
if "_sub_" in stdout:
prefix, suffix = stdout.split("_sub_")
device_resp = device_client.get(device_id)
keys = prefix.split(".")
actual_value = reduce(operator.getitem, keys, device_resp.json())
stdout = actual_value + suffix
tasks = context.services[service_name]["tasks"]
first_task = tasks[0]
task_resp = service_client.get(resource_id=first_task["task_id"])
task_message = task_resp.json()["message"]
context.check.assertEqual(
task_message["stdout"],
stdout,
msg="Stdout was {}, should be {}".format(
task_message["stdout"], stdout
),
)
context.check.assertEqual(
task_message["returncode"],
returncode,
msg="Returncode was {}, should be {}".format(
task_message["returncode"], returncode
),
)
context.check.assertEqual(
task_message["stderr"],
stderr,
msg="Stderr was {}, should be {}".format(
task_message["stderr"], stderr
),
)
|
|
280faf026f4950e0eea8e16bb785d36e30c7bad4
|
src/ggrc/migrations/versions/20131116235030_18e1e2aec298_add_options.py
|
src/ggrc/migrations/versions/20131116235030_18e1e2aec298_add_options.py
|
"""Add options
Revision ID: 18e1e2aec298
Revises: 37c5ca51ad36
Create Date: 2013-11-16 23:50:30.637169
"""
# revision identifiers, used by Alembic.
revision = '18e1e2aec298'
down_revision = '37c5ca51ad36'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
import datetime
options_table = table('options',
column('id', sa.Integer),
column('role', sa.String),
column('title', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('required', sa.Boolean),
column('context_id', sa.Integer),
)
options_values = [
{ 'role': 'network_zone', 'title': '3rd Party' },
{ 'role': 'network_zone', 'title': 'Core' },
{ 'role': 'network_zone', 'title': 'Service' },
]
changed_options_values = [
{ 'role': 'verify_frequency', 'old_title': 'Ad-Hoc', 'title': 'Bi-Weekly' },
{ 'role': 'verify_frequency', 'old_title': 'Bi-Annual', 'title': 'Bi-Monthly' },
{ 'role': 'verify_frequency', 'old_title': 'Semi-Annual', 'title': 'Semi-Annually' },
{ 'role': 'verify_frequency', 'old_title': 'Annual', 'title': 'Yearly' },
]
def upgrade():
timestamp = datetime.datetime.now()
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.insert().values(row))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['old_title']
)
).\
values({ 'title': row['title'] }))
def downgrade():
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.delete().where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
).\
values({ 'title': row['old_title'] }))
|
Add migration to create and update Option rows
|
Add migration to create and update Option rows
|
Python
|
apache-2.0
|
uskudnik/ggrc-core,uskudnik/ggrc-core,uskudnik/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,vladan-m/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,vladan-m/ggrc-core,edofic/ggrc-core,vladan-m/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,vladan-m/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,plamut/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,vladan-m/ggrc-core,prasannav7/ggrc-core,uskudnik/ggrc-core,hyperNURb/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core
|
Add migration to create and update Option rows
|
"""Add options
Revision ID: 18e1e2aec298
Revises: 37c5ca51ad36
Create Date: 2013-11-16 23:50:30.637169
"""
# revision identifiers, used by Alembic.
revision = '18e1e2aec298'
down_revision = '37c5ca51ad36'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
import datetime
options_table = table('options',
column('id', sa.Integer),
column('role', sa.String),
column('title', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('required', sa.Boolean),
column('context_id', sa.Integer),
)
options_values = [
{ 'role': 'network_zone', 'title': '3rd Party' },
{ 'role': 'network_zone', 'title': 'Core' },
{ 'role': 'network_zone', 'title': 'Service' },
]
changed_options_values = [
{ 'role': 'verify_frequency', 'old_title': 'Ad-Hoc', 'title': 'Bi-Weekly' },
{ 'role': 'verify_frequency', 'old_title': 'Bi-Annual', 'title': 'Bi-Monthly' },
{ 'role': 'verify_frequency', 'old_title': 'Semi-Annual', 'title': 'Semi-Annually' },
{ 'role': 'verify_frequency', 'old_title': 'Annual', 'title': 'Yearly' },
]
def upgrade():
timestamp = datetime.datetime.now()
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.insert().values(row))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['old_title']
)
).\
values({ 'title': row['title'] }))
def downgrade():
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.delete().where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
).\
values({ 'title': row['old_title'] }))
|
<commit_before><commit_msg>Add migration to create and update Option rows<commit_after>
|
"""Add options
Revision ID: 18e1e2aec298
Revises: 37c5ca51ad36
Create Date: 2013-11-16 23:50:30.637169
"""
# revision identifiers, used by Alembic.
revision = '18e1e2aec298'
down_revision = '37c5ca51ad36'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
import datetime
options_table = table('options',
column('id', sa.Integer),
column('role', sa.String),
column('title', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('required', sa.Boolean),
column('context_id', sa.Integer),
)
options_values = [
{ 'role': 'network_zone', 'title': '3rd Party' },
{ 'role': 'network_zone', 'title': 'Core' },
{ 'role': 'network_zone', 'title': 'Service' },
]
changed_options_values = [
{ 'role': 'verify_frequency', 'old_title': 'Ad-Hoc', 'title': 'Bi-Weekly' },
{ 'role': 'verify_frequency', 'old_title': 'Bi-Annual', 'title': 'Bi-Monthly' },
{ 'role': 'verify_frequency', 'old_title': 'Semi-Annual', 'title': 'Semi-Annually' },
{ 'role': 'verify_frequency', 'old_title': 'Annual', 'title': 'Yearly' },
]
def upgrade():
timestamp = datetime.datetime.now()
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.insert().values(row))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['old_title']
)
).\
values({ 'title': row['title'] }))
def downgrade():
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.delete().where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
).\
values({ 'title': row['old_title'] }))
|
Add migration to create and update Option rows
"""Add options
Revision ID: 18e1e2aec298
Revises: 37c5ca51ad36
Create Date: 2013-11-16 23:50:30.637169
"""
# revision identifiers, used by Alembic.
revision = '18e1e2aec298'
down_revision = '37c5ca51ad36'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
import datetime
options_table = table('options',
column('id', sa.Integer),
column('role', sa.String),
column('title', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('required', sa.Boolean),
column('context_id', sa.Integer),
)
options_values = [
{ 'role': 'network_zone', 'title': '3rd Party' },
{ 'role': 'network_zone', 'title': 'Core' },
{ 'role': 'network_zone', 'title': 'Service' },
]
changed_options_values = [
{ 'role': 'verify_frequency', 'old_title': 'Ad-Hoc', 'title': 'Bi-Weekly' },
{ 'role': 'verify_frequency', 'old_title': 'Bi-Annual', 'title': 'Bi-Monthly' },
{ 'role': 'verify_frequency', 'old_title': 'Semi-Annual', 'title': 'Semi-Annually' },
{ 'role': 'verify_frequency', 'old_title': 'Annual', 'title': 'Yearly' },
]
def upgrade():
timestamp = datetime.datetime.now()
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.insert().values(row))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['old_title']
)
).\
values({ 'title': row['title'] }))
def downgrade():
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.delete().where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
).\
values({ 'title': row['old_title'] }))
|
<commit_before><commit_msg>Add migration to create and update Option rows<commit_after>
"""Add options
Revision ID: 18e1e2aec298
Revises: 37c5ca51ad36
Create Date: 2013-11-16 23:50:30.637169
"""
# revision identifiers, used by Alembic.
revision = '18e1e2aec298'
down_revision = '37c5ca51ad36'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
import datetime
options_table = table('options',
column('id', sa.Integer),
column('role', sa.String),
column('title', sa.Text),
column('description', sa.Text),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('required', sa.Boolean),
column('context_id', sa.Integer),
)
options_values = [
{ 'role': 'network_zone', 'title': '3rd Party' },
{ 'role': 'network_zone', 'title': 'Core' },
{ 'role': 'network_zone', 'title': 'Service' },
]
changed_options_values = [
{ 'role': 'verify_frequency', 'old_title': 'Ad-Hoc', 'title': 'Bi-Weekly' },
{ 'role': 'verify_frequency', 'old_title': 'Bi-Annual', 'title': 'Bi-Monthly' },
{ 'role': 'verify_frequency', 'old_title': 'Semi-Annual', 'title': 'Semi-Annually' },
{ 'role': 'verify_frequency', 'old_title': 'Annual', 'title': 'Yearly' },
]
def upgrade():
timestamp = datetime.datetime.now()
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.insert().values(row))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['old_title']
)
).\
values({ 'title': row['title'] }))
def downgrade():
connection = op.get_bind()
for i, row in enumerate(options_values, start = 1):
row = dict(row)
connection.execute(options_table.delete().where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
))
for i, row in enumerate(changed_options_values):
row = dict(row)
connection.execute(
options_table.update().\
where(
sa.and_(
options_table.c.role == row['role'],
options_table.c.title == row['title']
)
).\
values({ 'title': row['old_title'] }))
|
|
86d889edced648192cf21010608075540e5ec868
|
py/longest-continuous-increasing-subsequence.py
|
py/longest-continuous-increasing-subsequence.py
|
class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
start = 0
prev = None
m = 0
for i, n in enumerate(nums):
if prev is not None:
if n <= prev:
start = i
m = max(m, i - start + 1)
prev = n
return m
|
Add py solution for 674. Longest Continuous Increasing Subsequence
|
Add py solution for 674. Longest Continuous Increasing Subsequence
674. Longest Continuous Increasing Subsequence: https://leetcode.com/problems/longest-continuous-increasing-subsequence/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 674. Longest Continuous Increasing Subsequence
674. Longest Continuous Increasing Subsequence: https://leetcode.com/problems/longest-continuous-increasing-subsequence/
|
class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
start = 0
prev = None
m = 0
for i, n in enumerate(nums):
if prev is not None:
if n <= prev:
start = i
m = max(m, i - start + 1)
prev = n
return m
|
<commit_before><commit_msg>Add py solution for 674. Longest Continuous Increasing Subsequence
674. Longest Continuous Increasing Subsequence: https://leetcode.com/problems/longest-continuous-increasing-subsequence/<commit_after>
|
class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
start = 0
prev = None
m = 0
for i, n in enumerate(nums):
if prev is not None:
if n <= prev:
start = i
m = max(m, i - start + 1)
prev = n
return m
|
Add py solution for 674. Longest Continuous Increasing Subsequence
674. Longest Continuous Increasing Subsequence: https://leetcode.com/problems/longest-continuous-increasing-subsequence/class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
start = 0
prev = None
m = 0
for i, n in enumerate(nums):
if prev is not None:
if n <= prev:
start = i
m = max(m, i - start + 1)
prev = n
return m
|
<commit_before><commit_msg>Add py solution for 674. Longest Continuous Increasing Subsequence
674. Longest Continuous Increasing Subsequence: https://leetcode.com/problems/longest-continuous-increasing-subsequence/<commit_after>class Solution(object):
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
start = 0
prev = None
m = 0
for i, n in enumerate(nums):
if prev is not None:
if n <= prev:
start = i
m = max(m, i - start + 1)
prev = n
return m
|
|
a8a850c290b4b09fbdf439e3e41c1bdf35be7f11
|
python/src/maximumSubarray/testMaximumSubarray.py
|
python/src/maximumSubarray/testMaximumSubarray.py
|
import unittest
from maximumSubarray import Solution
class TestMaximumSubarrayEdgeCases(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testEmptyArrayReturnsZero(self):
self.assertRaises(ValueError, self.solution.maxSubArray, [])
def testArrayWithSingleEntryReturnsEntry(self):
self.assertEqual(self.solution.maxSubArray([-1,]), -1)
self.assertEqual(self.solution.maxSubArray([0,]), 0)
self.assertEqual(self.solution.maxSubArray([1,]), 1)
class TestMaximumSubarrayWithExamples(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testArrayWithAllNegativeNumbersReturnsLeastNegative(self):
testArray = [-6, -5, -2, -4, -3]
self.assertEqual(self.solution.maxSubArray(testArray), -2)
def testArrayWithNegativeNumbersAndZeroReturnsZero(self):
testArray = [-6, -5, 0, -4, -3, -2]
self.assertEqual(self.solution.maxSubArray(testArray), 0)
def testProblemExampleReturns6(self):
testArray = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
self.assertEqual(self.solution.maxSubArray(testArray), 6)
if __name__ == '__main__':
unittest.main()
|
Add 5 test cases for maximumSubarray problem.
|
Add 5 test cases for maximumSubarray problem.
|
Python
|
mit
|
TheGhostHuCodes/leetCode
|
Add 5 test cases for maximumSubarray problem.
|
import unittest
from maximumSubarray import Solution
class TestMaximumSubarrayEdgeCases(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testEmptyArrayReturnsZero(self):
self.assertRaises(ValueError, self.solution.maxSubArray, [])
def testArrayWithSingleEntryReturnsEntry(self):
self.assertEqual(self.solution.maxSubArray([-1,]), -1)
self.assertEqual(self.solution.maxSubArray([0,]), 0)
self.assertEqual(self.solution.maxSubArray([1,]), 1)
class TestMaximumSubarrayWithExamples(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testArrayWithAllNegativeNumbersReturnsLeastNegative(self):
testArray = [-6, -5, -2, -4, -3]
self.assertEqual(self.solution.maxSubArray(testArray), -2)
def testArrayWithNegativeNumbersAndZeroReturnsZero(self):
testArray = [-6, -5, 0, -4, -3, -2]
self.assertEqual(self.solution.maxSubArray(testArray), 0)
def testProblemExampleReturns6(self):
testArray = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
self.assertEqual(self.solution.maxSubArray(testArray), 6)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add 5 test cases for maximumSubarray problem.<commit_after>
|
import unittest
from maximumSubarray import Solution
class TestMaximumSubarrayEdgeCases(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testEmptyArrayReturnsZero(self):
self.assertRaises(ValueError, self.solution.maxSubArray, [])
def testArrayWithSingleEntryReturnsEntry(self):
self.assertEqual(self.solution.maxSubArray([-1,]), -1)
self.assertEqual(self.solution.maxSubArray([0,]), 0)
self.assertEqual(self.solution.maxSubArray([1,]), 1)
class TestMaximumSubarrayWithExamples(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testArrayWithAllNegativeNumbersReturnsLeastNegative(self):
testArray = [-6, -5, -2, -4, -3]
self.assertEqual(self.solution.maxSubArray(testArray), -2)
def testArrayWithNegativeNumbersAndZeroReturnsZero(self):
testArray = [-6, -5, 0, -4, -3, -2]
self.assertEqual(self.solution.maxSubArray(testArray), 0)
def testProblemExampleReturns6(self):
testArray = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
self.assertEqual(self.solution.maxSubArray(testArray), 6)
if __name__ == '__main__':
unittest.main()
|
Add 5 test cases for maximumSubarray problem.import unittest
from maximumSubarray import Solution
class TestMaximumSubarrayEdgeCases(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testEmptyArrayReturnsZero(self):
self.assertRaises(ValueError, self.solution.maxSubArray, [])
def testArrayWithSingleEntryReturnsEntry(self):
self.assertEqual(self.solution.maxSubArray([-1,]), -1)
self.assertEqual(self.solution.maxSubArray([0,]), 0)
self.assertEqual(self.solution.maxSubArray([1,]), 1)
class TestMaximumSubarrayWithExamples(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testArrayWithAllNegativeNumbersReturnsLeastNegative(self):
testArray = [-6, -5, -2, -4, -3]
self.assertEqual(self.solution.maxSubArray(testArray), -2)
def testArrayWithNegativeNumbersAndZeroReturnsZero(self):
testArray = [-6, -5, 0, -4, -3, -2]
self.assertEqual(self.solution.maxSubArray(testArray), 0)
def testProblemExampleReturns6(self):
testArray = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
self.assertEqual(self.solution.maxSubArray(testArray), 6)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add 5 test cases for maximumSubarray problem.<commit_after>import unittest
from maximumSubarray import Solution
class TestMaximumSubarrayEdgeCases(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testEmptyArrayReturnsZero(self):
self.assertRaises(ValueError, self.solution.maxSubArray, [])
def testArrayWithSingleEntryReturnsEntry(self):
self.assertEqual(self.solution.maxSubArray([-1,]), -1)
self.assertEqual(self.solution.maxSubArray([0,]), 0)
self.assertEqual(self.solution.maxSubArray([1,]), 1)
class TestMaximumSubarrayWithExamples(unittest.TestCase):
def setUp(self):
self.solution = Solution()
def testArrayWithAllNegativeNumbersReturnsLeastNegative(self):
testArray = [-6, -5, -2, -4, -3]
self.assertEqual(self.solution.maxSubArray(testArray), -2)
def testArrayWithNegativeNumbersAndZeroReturnsZero(self):
testArray = [-6, -5, 0, -4, -3, -2]
self.assertEqual(self.solution.maxSubArray(testArray), 0)
def testProblemExampleReturns6(self):
testArray = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
self.assertEqual(self.solution.maxSubArray(testArray), 6)
if __name__ == '__main__':
unittest.main()
|
|
7a7b68b369319b80e7797c5d54b0ab6ba57768c2
|
tools/similarweb/add_all_media_to_similarweb_queue.py
|
tools/similarweb/add_all_media_to_similarweb_queue.py
|
#!/usr/bin/env python3
from mediawords.db import connect_to_db, DatabaseHandler
from mediawords.job.similarweb.update_audience_data import SimilarWebUpdateAudienceDataJob
from mediawords.util.log import create_logger
log = create_logger(__name__)
def add_all_media_to_similarweb_queue(db: DatabaseHandler):
"""Add all media IDs to SimilarWeb's queue."""
log.info("Fetching all media IDs...")
media_ids = db.query("""
SELECT media_id
FROM media
ORDER BY media_id
""").flat()
for media_id in media_ids:
log.info("Adding media ID %d" % media_id)
SimilarWebUpdateAudienceDataJob.add_to_queue(media_id=media_id)
if __name__ == "__main__":
db = connect_to_db()
add_all_media_to_similarweb_queue(db=db)
|
Add script that adds all media IDs to SimilarWeb's queue
|
Add script that adds all media IDs to SimilarWeb's queue
|
Python
|
agpl-3.0
|
berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud
|
Add script that adds all media IDs to SimilarWeb's queue
|
#!/usr/bin/env python3
from mediawords.db import connect_to_db, DatabaseHandler
from mediawords.job.similarweb.update_audience_data import SimilarWebUpdateAudienceDataJob
from mediawords.util.log import create_logger
log = create_logger(__name__)
def add_all_media_to_similarweb_queue(db: DatabaseHandler):
"""Add all media IDs to SimilarWeb's queue."""
log.info("Fetching all media IDs...")
media_ids = db.query("""
SELECT media_id
FROM media
ORDER BY media_id
""").flat()
for media_id in media_ids:
log.info("Adding media ID %d" % media_id)
SimilarWebUpdateAudienceDataJob.add_to_queue(media_id=media_id)
if __name__ == "__main__":
db = connect_to_db()
add_all_media_to_similarweb_queue(db=db)
|
<commit_before><commit_msg>Add script that adds all media IDs to SimilarWeb's queue<commit_after>
|
#!/usr/bin/env python3
from mediawords.db import connect_to_db, DatabaseHandler
from mediawords.job.similarweb.update_audience_data import SimilarWebUpdateAudienceDataJob
from mediawords.util.log import create_logger
log = create_logger(__name__)
def add_all_media_to_similarweb_queue(db: DatabaseHandler):
"""Add all media IDs to SimilarWeb's queue."""
log.info("Fetching all media IDs...")
media_ids = db.query("""
SELECT media_id
FROM media
ORDER BY media_id
""").flat()
for media_id in media_ids:
log.info("Adding media ID %d" % media_id)
SimilarWebUpdateAudienceDataJob.add_to_queue(media_id=media_id)
if __name__ == "__main__":
db = connect_to_db()
add_all_media_to_similarweb_queue(db=db)
|
Add script that adds all media IDs to SimilarWeb's queue#!/usr/bin/env python3
from mediawords.db import connect_to_db, DatabaseHandler
from mediawords.job.similarweb.update_audience_data import SimilarWebUpdateAudienceDataJob
from mediawords.util.log import create_logger
log = create_logger(__name__)
def add_all_media_to_similarweb_queue(db: DatabaseHandler):
"""Add all media IDs to SimilarWeb's queue."""
log.info("Fetching all media IDs...")
media_ids = db.query("""
SELECT media_id
FROM media
ORDER BY media_id
""").flat()
for media_id in media_ids:
log.info("Adding media ID %d" % media_id)
SimilarWebUpdateAudienceDataJob.add_to_queue(media_id=media_id)
if __name__ == "__main__":
db = connect_to_db()
add_all_media_to_similarweb_queue(db=db)
|
<commit_before><commit_msg>Add script that adds all media IDs to SimilarWeb's queue<commit_after>#!/usr/bin/env python3
from mediawords.db import connect_to_db, DatabaseHandler
from mediawords.job.similarweb.update_audience_data import SimilarWebUpdateAudienceDataJob
from mediawords.util.log import create_logger
log = create_logger(__name__)
def add_all_media_to_similarweb_queue(db: DatabaseHandler):
"""Add all media IDs to SimilarWeb's queue."""
log.info("Fetching all media IDs...")
media_ids = db.query("""
SELECT media_id
FROM media
ORDER BY media_id
""").flat()
for media_id in media_ids:
log.info("Adding media ID %d" % media_id)
SimilarWebUpdateAudienceDataJob.add_to_queue(media_id=media_id)
if __name__ == "__main__":
db = connect_to_db()
add_all_media_to_similarweb_queue(db=db)
|
|
dddcec5d7712750617118843915fbae260eb8ffc
|
problem_3.py
|
problem_3.py
|
# Simple function for primeness test
def is_prime(number):
import math
limit = int(math.sqrt(number)) + 1
if number == 2:
return True
if number % 2 == 0:
return False
for i in range(3, limit, 2):
if number % i == 0:
return False
return True
import math
large_number = 600851475143
limit_large_number = int(math.sqrt(large_number))
answer = -1
for i in range(2, limit_large_number):
if large_number % i == 0 and is_prime(i): # short circuit evaluation FTW!
answer = i
print("Result:", answer)
|
Solve problem 3 in Python
|
Solve problem 3 in Python
|
Python
|
mit
|
sirodoht/project-euler,sirodoht/project-euler,sirodoht/project-euler
|
Solve problem 3 in Python
|
# Simple function for primeness test
def is_prime(number):
import math
limit = int(math.sqrt(number)) + 1
if number == 2:
return True
if number % 2 == 0:
return False
for i in range(3, limit, 2):
if number % i == 0:
return False
return True
import math
large_number = 600851475143
limit_large_number = int(math.sqrt(large_number))
answer = -1
for i in range(2, limit_large_number):
if large_number % i == 0 and is_prime(i): # short circuit evaluation FTW!
answer = i
print("Result:", answer)
|
<commit_before><commit_msg>Solve problem 3 in Python<commit_after>
|
# Simple function for primeness test
def is_prime(number):
import math
limit = int(math.sqrt(number)) + 1
if number == 2:
return True
if number % 2 == 0:
return False
for i in range(3, limit, 2):
if number % i == 0:
return False
return True
import math
large_number = 600851475143
limit_large_number = int(math.sqrt(large_number))
answer = -1
for i in range(2, limit_large_number):
if large_number % i == 0 and is_prime(i): # short circuit evaluation FTW!
answer = i
print("Result:", answer)
|
Solve problem 3 in Python# Simple function for primeness test
def is_prime(number):
import math
limit = int(math.sqrt(number)) + 1
if number == 2:
return True
if number % 2 == 0:
return False
for i in range(3, limit, 2):
if number % i == 0:
return False
return True
import math
large_number = 600851475143
limit_large_number = int(math.sqrt(large_number))
answer = -1
for i in range(2, limit_large_number):
if large_number % i == 0 and is_prime(i): # short circuit evaluation FTW!
answer = i
print("Result:", answer)
|
<commit_before><commit_msg>Solve problem 3 in Python<commit_after># Simple function for primeness test
def is_prime(number):
import math
limit = int(math.sqrt(number)) + 1
if number == 2:
return True
if number % 2 == 0:
return False
for i in range(3, limit, 2):
if number % i == 0:
return False
return True
import math
large_number = 600851475143
limit_large_number = int(math.sqrt(large_number))
answer = -1
for i in range(2, limit_large_number):
if large_number % i == 0 and is_prime(i): # short circuit evaluation FTW!
answer = i
print("Result:", answer)
|
|
cd8653248ae87e50b9d392a1021dc5e63996d79a
|
distribution/src/main/resources/bin/list-ghe-repos.py
|
distribution/src/main/resources/bin/list-ghe-repos.py
|
#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
|
Add script to list repo URLs from GH enterprise.
|
Add script to list repo URLs from GH enterprise.
|
Python
|
apache-2.0
|
chrismattmann/drat,chrismattmann/drat,chrismattmann/drat,chrismattmann/drat,chrismattmann/drat
|
Add script to list repo URLs from GH enterprise.
|
#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
|
<commit_before><commit_msg>Add script to list repo URLs from GH enterprise.<commit_after>
|
#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
|
Add script to list repo URLs from GH enterprise.#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
|
<commit_before><commit_msg>Add script to list repo URLs from GH enterprise.<commit_after>#!/usr/bin/env python3.7
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import github3
import sys
githubent = github3.enterprise_login(
username=sys.argv[1],
password=sys.argv[2],
url=sys.argv[3])
for d in ["2014", "2015", "2016", "2017", "2018", "2019"]:
dateFilter = "created:<"+d+" and created:>"+str(int(d)-1)
for r in githubent.search_repositories("is:public and "+dateFilter):
print(d+","+r.clone_url)
|
|
acade24d7bc3a460429eea71f694ff5ffbc0a7ac
|
src/python/pullingFromCSV.py
|
src/python/pullingFromCSV.py
|
#!/usr/bin/python
'''
Author : Various Users
Date : 06/11/2015
Description : This will be a a relatively simple document breaking down the city_bash project
'''
##Necessary modules??
import openpyxl
import csv
import os
class CityBash(object):
def __init__(self):
pass
def pull_csv(self):
'''
Pull CSV data into python and seperate the fields by variables
'''
os.chdir("//Documents/GitStuff/Project_Boston_Housing") # Depending on where we'll be pulling this from
wb = load_workbook('initial.xlsx')
print(wb)
def get_geocoded_cordinates(self):
pass
def main():
print("###This should work")
new = CityBash()
new_csv = new.self.pull_csv()
print(new_csv)
#Run the intial pull_csv and
main()
|
Add src/python directory and put Python script in it
|
Add src/python directory and put Python script in it
|
Python
|
mit
|
codeforboston/cornerwise,cityofsomerville/citydash,ejegg/citydash,cityofsomerville/citydash,patilav/citydash,codeforboston/cornerwise,patilav/citydash,ejegg/citydash,wrightgd/citydash,cityofsomerville/cornerwise,cityofsomerville/cornerwise,codeforboston/cornerwise,wrightgd/citydash,ejegg/citydash,patilav/citydash,patilav/citydash,cityofsomerville/citydash,cityofsomerville/cornerwise,cityofsomerville/citydash,wrightgd/citydash,wrightgd/citydash,cityofsomerville/cornerwise,codeforboston/cornerwise,ejegg/citydash
|
Add src/python directory and put Python script in it
|
#!/usr/bin/python
'''
Author : Various Users
Date : 06/11/2015
Description : This will be a a relatively simple document breaking down the city_bash project
'''
##Necessary modules??
import openpyxl
import csv
import os
class CityBash(object):
def __init__(self):
pass
def pull_csv(self):
'''
Pull CSV data into python and seperate the fields by variables
'''
os.chdir("//Documents/GitStuff/Project_Boston_Housing") # Depending on where we'll be pulling this from
wb = load_workbook('initial.xlsx')
print(wb)
def get_geocoded_cordinates(self):
pass
def main():
print("###This should work")
new = CityBash()
new_csv = new.self.pull_csv()
print(new_csv)
#Run the intial pull_csv and
main()
|
<commit_before><commit_msg>Add src/python directory and put Python script in it<commit_after>
|
#!/usr/bin/python
'''
Author : Various Users
Date : 06/11/2015
Description : This will be a a relatively simple document breaking down the city_bash project
'''
##Necessary modules??
import openpyxl
import csv
import os
class CityBash(object):
def __init__(self):
pass
def pull_csv(self):
'''
Pull CSV data into python and seperate the fields by variables
'''
os.chdir("//Documents/GitStuff/Project_Boston_Housing") # Depending on where we'll be pulling this from
wb = load_workbook('initial.xlsx')
print(wb)
def get_geocoded_cordinates(self):
pass
def main():
print("###This should work")
new = CityBash()
new_csv = new.self.pull_csv()
print(new_csv)
#Run the intial pull_csv and
main()
|
Add src/python directory and put Python script in it#!/usr/bin/python
'''
Author : Various Users
Date : 06/11/2015
Description : This will be a a relatively simple document breaking down the city_bash project
'''
##Necessary modules??
import openpyxl
import csv
import os
class CityBash(object):
def __init__(self):
pass
def pull_csv(self):
'''
Pull CSV data into python and seperate the fields by variables
'''
os.chdir("//Documents/GitStuff/Project_Boston_Housing") # Depending on where we'll be pulling this from
wb = load_workbook('initial.xlsx')
print(wb)
def get_geocoded_cordinates(self):
pass
def main():
print("###This should work")
new = CityBash()
new_csv = new.self.pull_csv()
print(new_csv)
#Run the intial pull_csv and
main()
|
<commit_before><commit_msg>Add src/python directory and put Python script in it<commit_after>#!/usr/bin/python
'''
Author : Various Users
Date : 06/11/2015
Description : This will be a a relatively simple document breaking down the city_bash project
'''
##Necessary modules??
import openpyxl
import csv
import os
class CityBash(object):
def __init__(self):
pass
def pull_csv(self):
'''
Pull CSV data into python and seperate the fields by variables
'''
os.chdir("//Documents/GitStuff/Project_Boston_Housing") # Depending on where we'll be pulling this from
wb = load_workbook('initial.xlsx')
print(wb)
def get_geocoded_cordinates(self):
pass
def main():
print("###This should work")
new = CityBash()
new_csv = new.self.pull_csv()
print(new_csv)
#Run the intial pull_csv and
main()
|
|
390df82feffb236820582a74b269b4cd5be5b6d4
|
st2actions/tests/test_runner_container_service.py
|
st2actions/tests/test_runner_container_service.py
|
try:
import simplejson as json
except ImportError:
import json
import os
from oslo.config import cfg
import unittest2
from st2actions.container.service import RunnerContainerService
class RunnerContainerServiceTest(unittest2.TestCase):
def test_get_entry_point_absolute_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='/foo/bar.py')
self.assertEquals(acutal_path, '/foo/bar.py', 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_get_entry_point_relative_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.content_packs_base_path, 'foo', 'actions',
'foo/bar.py')
self.assertEquals(acutal_path, expected_path, 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_report_result_json(self):
service = RunnerContainerService(None)
result = '["foo", {"bar": ["baz", null, 1.0, 2]}]'
service.report_result(result)
self.assertEquals(json.dumps(service.get_result()), result,
'JON results aren\'t handled right')
|
Add test cases for runner container service.
|
Add test cases for runner container service.
|
Python
|
apache-2.0
|
Plexxi/st2,Plexxi/st2,tonybaloney/st2,pinterb/st2,armab/st2,armab/st2,pixelrebel/st2,pixelrebel/st2,Itxaka/st2,jtopjian/st2,alfasin/st2,grengojbo/st2,pinterb/st2,tonybaloney/st2,Plexxi/st2,emedvedev/st2,nzlosh/st2,lakshmi-kannan/st2,pixelrebel/st2,StackStorm/st2,alfasin/st2,grengojbo/st2,Plexxi/st2,StackStorm/st2,jtopjian/st2,alfasin/st2,dennybaa/st2,Itxaka/st2,punalpatel/st2,punalpatel/st2,lakshmi-kannan/st2,Itxaka/st2,emedvedev/st2,StackStorm/st2,lakshmi-kannan/st2,tonybaloney/st2,emedvedev/st2,armab/st2,punalpatel/st2,nzlosh/st2,peak6/st2,pinterb/st2,dennybaa/st2,peak6/st2,StackStorm/st2,nzlosh/st2,dennybaa/st2,nzlosh/st2,grengojbo/st2,jtopjian/st2,peak6/st2
|
Add test cases for runner container service.
|
try:
import simplejson as json
except ImportError:
import json
import os
from oslo.config import cfg
import unittest2
from st2actions.container.service import RunnerContainerService
class RunnerContainerServiceTest(unittest2.TestCase):
def test_get_entry_point_absolute_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='/foo/bar.py')
self.assertEquals(acutal_path, '/foo/bar.py', 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_get_entry_point_relative_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.content_packs_base_path, 'foo', 'actions',
'foo/bar.py')
self.assertEquals(acutal_path, expected_path, 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_report_result_json(self):
service = RunnerContainerService(None)
result = '["foo", {"bar": ["baz", null, 1.0, 2]}]'
service.report_result(result)
self.assertEquals(json.dumps(service.get_result()), result,
'JON results aren\'t handled right')
|
<commit_before><commit_msg>Add test cases for runner container service.<commit_after>
|
try:
import simplejson as json
except ImportError:
import json
import os
from oslo.config import cfg
import unittest2
from st2actions.container.service import RunnerContainerService
class RunnerContainerServiceTest(unittest2.TestCase):
def test_get_entry_point_absolute_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='/foo/bar.py')
self.assertEquals(acutal_path, '/foo/bar.py', 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_get_entry_point_relative_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.content_packs_base_path, 'foo', 'actions',
'foo/bar.py')
self.assertEquals(acutal_path, expected_path, 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_report_result_json(self):
service = RunnerContainerService(None)
result = '["foo", {"bar": ["baz", null, 1.0, 2]}]'
service.report_result(result)
self.assertEquals(json.dumps(service.get_result()), result,
'JON results aren\'t handled right')
|
Add test cases for runner container service.try:
import simplejson as json
except ImportError:
import json
import os
from oslo.config import cfg
import unittest2
from st2actions.container.service import RunnerContainerService
class RunnerContainerServiceTest(unittest2.TestCase):
def test_get_entry_point_absolute_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='/foo/bar.py')
self.assertEquals(acutal_path, '/foo/bar.py', 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_get_entry_point_relative_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.content_packs_base_path, 'foo', 'actions',
'foo/bar.py')
self.assertEquals(acutal_path, expected_path, 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_report_result_json(self):
service = RunnerContainerService(None)
result = '["foo", {"bar": ["baz", null, 1.0, 2]}]'
service.report_result(result)
self.assertEquals(json.dumps(service.get_result()), result,
'JON results aren\'t handled right')
|
<commit_before><commit_msg>Add test cases for runner container service.<commit_after>try:
import simplejson as json
except ImportError:
import json
import os
from oslo.config import cfg
import unittest2
from st2actions.container.service import RunnerContainerService
class RunnerContainerServiceTest(unittest2.TestCase):
def test_get_entry_point_absolute_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='/foo/bar.py')
self.assertEquals(acutal_path, '/foo/bar.py', 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_get_entry_point_relative_path(self):
service = RunnerContainerService(None)
orig_path = cfg.CONF.content.content_packs_base_path
cfg.CONF.content.content_packs_base_path = '/tests/packs'
acutal_path = service.get_entry_point_abs_path(pack='foo', entry_point='foo/bar.py')
expected_path = os.path.join(cfg.CONF.content.content_packs_base_path, 'foo', 'actions',
'foo/bar.py')
self.assertEquals(acutal_path, expected_path, 'Entry point path doesn\'t match.')
cfg.CONF.content.content_packs_base_path = orig_path
def test_report_result_json(self):
service = RunnerContainerService(None)
result = '["foo", {"bar": ["baz", null, 1.0, 2]}]'
service.report_result(result)
self.assertEquals(json.dumps(service.get_result()), result,
'JON results aren\'t handled right')
|
|
938cd71141285def1403297ab70b3ef949b78b8a
|
tests/commit/test_ci_installation.py
|
tests/commit/test_ci_installation.py
|
from unittest import TestCase
import phi
class TestCIInstallation(TestCase):
def test_detect_tf_torch_jax(self):
backends = phi.detect_backends()
names = [b.name for b in backends]
self.assertIn('PyTorch', names)
self.assertIn('Jax', names)
self.assertIn('TensorFlow', names)
|
Test that all backends detected on GitHub Actions
|
[tests] Test that all backends detected on GitHub Actions
|
Python
|
mit
|
tum-pbs/PhiFlow,tum-pbs/PhiFlow
|
[tests] Test that all backends detected on GitHub Actions
|
from unittest import TestCase
import phi
class TestCIInstallation(TestCase):
def test_detect_tf_torch_jax(self):
backends = phi.detect_backends()
names = [b.name for b in backends]
self.assertIn('PyTorch', names)
self.assertIn('Jax', names)
self.assertIn('TensorFlow', names)
|
<commit_before><commit_msg>[tests] Test that all backends detected on GitHub Actions<commit_after>
|
from unittest import TestCase
import phi
class TestCIInstallation(TestCase):
def test_detect_tf_torch_jax(self):
backends = phi.detect_backends()
names = [b.name for b in backends]
self.assertIn('PyTorch', names)
self.assertIn('Jax', names)
self.assertIn('TensorFlow', names)
|
[tests] Test that all backends detected on GitHub Actionsfrom unittest import TestCase
import phi
class TestCIInstallation(TestCase):
def test_detect_tf_torch_jax(self):
backends = phi.detect_backends()
names = [b.name for b in backends]
self.assertIn('PyTorch', names)
self.assertIn('Jax', names)
self.assertIn('TensorFlow', names)
|
<commit_before><commit_msg>[tests] Test that all backends detected on GitHub Actions<commit_after>from unittest import TestCase
import phi
class TestCIInstallation(TestCase):
def test_detect_tf_torch_jax(self):
backends = phi.detect_backends()
names = [b.name for b in backends]
self.assertIn('PyTorch', names)
self.assertIn('Jax', names)
self.assertIn('TensorFlow', names)
|
|
783795235e5382737893b5b92c3764785f1f35ea
|
local_server.py
|
local_server.py
|
#!/usr/bin/env python
import sys
import SimpleHTTPServer
import SocketServer
PORT = 8000
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
Work on refining the demo code to add some markers, etc.
|
Work on refining the demo code to add some markers, etc.
|
Python
|
mit
|
vrsource/vrs.ux.touch,vrsource/vrs.ux.touch,vrsource/vrs.ux.touch,vrsource/vrs.ux.touch
|
Work on refining the demo code to add some markers, etc.
|
#!/usr/bin/env python
import sys
import SimpleHTTPServer
import SocketServer
PORT = 8000
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
<commit_before><commit_msg>Work on refining the demo code to add some markers, etc.<commit_after>
|
#!/usr/bin/env python
import sys
import SimpleHTTPServer
import SocketServer
PORT = 8000
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
Work on refining the demo code to add some markers, etc.#!/usr/bin/env python
import sys
import SimpleHTTPServer
import SocketServer
PORT = 8000
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
<commit_before><commit_msg>Work on refining the demo code to add some markers, etc.<commit_after>#!/usr/bin/env python
import sys
import SimpleHTTPServer
import SocketServer
PORT = 8000
if len(sys.argv) > 1:
PORT = int(sys.argv[1])
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print "serving at port", PORT
httpd.serve_forever()
|
|
4c46a60458ade26497dda43e704cacf0f77a40af
|
accounts/migrations/0015_create_mit_organization.py
|
accounts/migrations/0015_create_mit_organization.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-31 18:22
from __future__ import unicode_literals
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from accounts.models import Organization
def create_mit_organization(apps, schema_editor, *args, **kwargs):
db_alias = schema_editor.connection.alias
# this creates the permissions prior to the post_save signals
# on organization trying to use them
emit_post_migrate_signal(2, False, db_alias)
Organization.objects.create(
name='MIT',
url='https://lookit.mit.edu'
)
def remove_mit_organization(*args, **kwargs):
Organization.objects.filter(name='MIT').delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__latest__'),
('sites', '__latest__'),
('accounts', '0014_auto_20170726_1403'),
]
operations = [
migrations.RunPython(create_mit_organization, remove_mit_organization)
]
|
Add MIT organization in a migration
|
Add MIT organization in a migration
|
Python
|
apache-2.0
|
CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api,pattisdr/lookit-api,pattisdr/lookit-api,CenterForOpenScience/lookit-api,pattisdr/lookit-api
|
Add MIT organization in a migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-31 18:22
from __future__ import unicode_literals
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from accounts.models import Organization
def create_mit_organization(apps, schema_editor, *args, **kwargs):
db_alias = schema_editor.connection.alias
# this creates the permissions prior to the post_save signals
# on organization trying to use them
emit_post_migrate_signal(2, False, db_alias)
Organization.objects.create(
name='MIT',
url='https://lookit.mit.edu'
)
def remove_mit_organization(*args, **kwargs):
Organization.objects.filter(name='MIT').delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__latest__'),
('sites', '__latest__'),
('accounts', '0014_auto_20170726_1403'),
]
operations = [
migrations.RunPython(create_mit_organization, remove_mit_organization)
]
|
<commit_before><commit_msg>Add MIT organization in a migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-31 18:22
from __future__ import unicode_literals
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from accounts.models import Organization
def create_mit_organization(apps, schema_editor, *args, **kwargs):
db_alias = schema_editor.connection.alias
# this creates the permissions prior to the post_save signals
# on organization trying to use them
emit_post_migrate_signal(2, False, db_alias)
Organization.objects.create(
name='MIT',
url='https://lookit.mit.edu'
)
def remove_mit_organization(*args, **kwargs):
Organization.objects.filter(name='MIT').delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__latest__'),
('sites', '__latest__'),
('accounts', '0014_auto_20170726_1403'),
]
operations = [
migrations.RunPython(create_mit_organization, remove_mit_organization)
]
|
Add MIT organization in a migration# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-31 18:22
from __future__ import unicode_literals
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from accounts.models import Organization
def create_mit_organization(apps, schema_editor, *args, **kwargs):
db_alias = schema_editor.connection.alias
# this creates the permissions prior to the post_save signals
# on organization trying to use them
emit_post_migrate_signal(2, False, db_alias)
Organization.objects.create(
name='MIT',
url='https://lookit.mit.edu'
)
def remove_mit_organization(*args, **kwargs):
Organization.objects.filter(name='MIT').delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__latest__'),
('sites', '__latest__'),
('accounts', '0014_auto_20170726_1403'),
]
operations = [
migrations.RunPython(create_mit_organization, remove_mit_organization)
]
|
<commit_before><commit_msg>Add MIT organization in a migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-31 18:22
from __future__ import unicode_literals
from django.core.management.sql import emit_post_migrate_signal
from django.db import migrations
from accounts.models import Organization
def create_mit_organization(apps, schema_editor, *args, **kwargs):
db_alias = schema_editor.connection.alias
# this creates the permissions prior to the post_save signals
# on organization trying to use them
emit_post_migrate_signal(2, False, db_alias)
Organization.objects.create(
name='MIT',
url='https://lookit.mit.edu'
)
def remove_mit_organization(*args, **kwargs):
Organization.objects.filter(name='MIT').delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '__latest__'),
('sites', '__latest__'),
('accounts', '0014_auto_20170726_1403'),
]
operations = [
migrations.RunPython(create_mit_organization, remove_mit_organization)
]
|
|
9b9bc0a737fcaf6ffe0dbcaa9a6c5de9e82720db
|
nova/tests/functional/regressions/test_bug_1620248.py
|
nova/tests/functional/regressions/test_bug_1620248.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
class TestServerUpdate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerUpdate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# Simulate requests coming in before the instance is scheduled by
# using a no-op for conductor build_instances
self.useFixture(nova_fixtures.NoopConductorFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_update_name_before_scheduled(self):
server = dict(name='server0',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server_id = self.api.post_server({'server': server})['id']
server = {'server': {'name': 'server-renamed'}}
self.api.api_put('/servers/%s' % server_id, server)
server_name = self.api.get_server(server_id)['name']
self.assertEqual('server-renamed', server_name)
|
Add regression test for immediate server name update
|
Add regression test for immediate server name update
This tests a scenario where a user requests an update to the
server before it's scheduled and ensures it succeeds.
Related-Bug: #1620248
Change-Id: I13a9989d7f515ad2356d26b36b65742dbe48d1a8
|
Python
|
apache-2.0
|
openstack/nova,phenoxim/nova,mahak/nova,gooddata/openstack-nova,gooddata/openstack-nova,openstack/nova,hanlind/nova,jianghuaw/nova,rajalokan/nova,vmturbo/nova,rajalokan/nova,jianghuaw/nova,vmturbo/nova,mahak/nova,klmitch/nova,sebrandon1/nova,alaski/nova,openstack/nova,sebrandon1/nova,vmturbo/nova,klmitch/nova,jianghuaw/nova,klmitch/nova,cloudbase/nova,mahak/nova,hanlind/nova,rahulunair/nova,gooddata/openstack-nova,rajalokan/nova,hanlind/nova,Juniper/nova,mikalstill/nova,Juniper/nova,mikalstill/nova,Juniper/nova,cloudbase/nova,cloudbase/nova,sebrandon1/nova,mikalstill/nova,alaski/nova,rajalokan/nova,vmturbo/nova,jianghuaw/nova,gooddata/openstack-nova,klmitch/nova,Juniper/nova,rahulunair/nova,phenoxim/nova,rahulunair/nova
|
Add regression test for immediate server name update
This tests a scenario where a user requests an update to the
server before it's scheduled and ensures it succeeds.
Related-Bug: #1620248
Change-Id: I13a9989d7f515ad2356d26b36b65742dbe48d1a8
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
class TestServerUpdate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerUpdate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# Simulate requests coming in before the instance is scheduled by
# using a no-op for conductor build_instances
self.useFixture(nova_fixtures.NoopConductorFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_update_name_before_scheduled(self):
server = dict(name='server0',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server_id = self.api.post_server({'server': server})['id']
server = {'server': {'name': 'server-renamed'}}
self.api.api_put('/servers/%s' % server_id, server)
server_name = self.api.get_server(server_id)['name']
self.assertEqual('server-renamed', server_name)
|
<commit_before><commit_msg>Add regression test for immediate server name update
This tests a scenario where a user requests an update to the
server before it's scheduled and ensures it succeeds.
Related-Bug: #1620248
Change-Id: I13a9989d7f515ad2356d26b36b65742dbe48d1a8<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
class TestServerUpdate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerUpdate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# Simulate requests coming in before the instance is scheduled by
# using a no-op for conductor build_instances
self.useFixture(nova_fixtures.NoopConductorFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_update_name_before_scheduled(self):
server = dict(name='server0',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server_id = self.api.post_server({'server': server})['id']
server = {'server': {'name': 'server-renamed'}}
self.api.api_put('/servers/%s' % server_id, server)
server_name = self.api.get_server(server_id)['name']
self.assertEqual('server-renamed', server_name)
|
Add regression test for immediate server name update
This tests a scenario where a user requests an update to the
server before it's scheduled and ensures it succeeds.
Related-Bug: #1620248
Change-Id: I13a9989d7f515ad2356d26b36b65742dbe48d1a8# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
class TestServerUpdate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerUpdate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# Simulate requests coming in before the instance is scheduled by
# using a no-op for conductor build_instances
self.useFixture(nova_fixtures.NoopConductorFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_update_name_before_scheduled(self):
server = dict(name='server0',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server_id = self.api.post_server({'server': server})['id']
server = {'server': {'name': 'server-renamed'}}
self.api.api_put('/servers/%s' % server_id, server)
server_name = self.api.get_server(server_id)['name']
self.assertEqual('server-renamed', server_name)
|
<commit_before><commit_msg>Add regression test for immediate server name update
This tests a scenario where a user requests an update to the
server before it's scheduled and ensures it succeeds.
Related-Bug: #1620248
Change-Id: I13a9989d7f515ad2356d26b36b65742dbe48d1a8<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests.unit import policy_fixture
CONF = cfg.CONF
class TestServerUpdate(test.TestCase):
REQUIRES_LOCKING = True
def setUp(self):
super(TestServerUpdate, self).setUp()
self.useFixture(policy_fixture.RealPolicyFixture())
# Simulate requests coming in before the instance is scheduled by
# using a no-op for conductor build_instances
self.useFixture(nova_fixtures.NoopConductorFixture())
api_fixture = self.useFixture(nova_fixtures.OSAPIFixture(
api_version='v2.1'))
self.api = api_fixture.api
# the image fake backend needed for image discovery
nova.tests.unit.image.fake.stub_out_image_service(self)
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
self.image_id = self.api.get_images()[0]['id']
self.flavor_id = self.api.get_flavors()[0]['id']
def test_update_name_before_scheduled(self):
server = dict(name='server0',
imageRef=self.image_id,
flavorRef=self.flavor_id)
server_id = self.api.post_server({'server': server})['id']
server = {'server': {'name': 'server-renamed'}}
self.api.api_put('/servers/%s' % server_id, server)
server_name = self.api.get_server(server_id)['name']
self.assertEqual('server-renamed', server_name)
|
|
df93e3523a94eff62407bb6b326c3ca2a122644c
|
corehq/apps/sms/migrations/0049_auto_enable_turnio_ff.py
|
corehq/apps/sms/migrations/0049_auto_enable_turnio_ff.py
|
# Generated by Django 2.2.24 on 2021-06-10 09:13
from django.db import migrations
from corehq.messaging.smsbackends.turn.models import SQLTurnWhatsAppBackend
from corehq.toggles import TURN_IO_BACKEND
def auto_enable_turnio_ff_for_certain_domains(apps, schema_editor):
for backend in SQLTurnWhatsAppBackend.active_objects.all():
domain = backend.domain
TURN_IO_BACKEND.set(item=domain, enabled=True, namespace='domain')
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('sms', '0048_delete_sqlicdsbackend'),
]
operations = [
migrations.RunPython(auto_enable_turnio_ff_for_certain_domains, reverse_code=noop),
]
|
Add migration: auto-enable Turn.io for domains using it
|
Add migration: auto-enable Turn.io for domains using it
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add migration: auto-enable Turn.io for domains using it
|
# Generated by Django 2.2.24 on 2021-06-10 09:13
from django.db import migrations
from corehq.messaging.smsbackends.turn.models import SQLTurnWhatsAppBackend
from corehq.toggles import TURN_IO_BACKEND
def auto_enable_turnio_ff_for_certain_domains(apps, schema_editor):
for backend in SQLTurnWhatsAppBackend.active_objects.all():
domain = backend.domain
TURN_IO_BACKEND.set(item=domain, enabled=True, namespace='domain')
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('sms', '0048_delete_sqlicdsbackend'),
]
operations = [
migrations.RunPython(auto_enable_turnio_ff_for_certain_domains, reverse_code=noop),
]
|
<commit_before><commit_msg>Add migration: auto-enable Turn.io for domains using it<commit_after>
|
# Generated by Django 2.2.24 on 2021-06-10 09:13
from django.db import migrations
from corehq.messaging.smsbackends.turn.models import SQLTurnWhatsAppBackend
from corehq.toggles import TURN_IO_BACKEND
def auto_enable_turnio_ff_for_certain_domains(apps, schema_editor):
for backend in SQLTurnWhatsAppBackend.active_objects.all():
domain = backend.domain
TURN_IO_BACKEND.set(item=domain, enabled=True, namespace='domain')
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('sms', '0048_delete_sqlicdsbackend'),
]
operations = [
migrations.RunPython(auto_enable_turnio_ff_for_certain_domains, reverse_code=noop),
]
|
Add migration: auto-enable Turn.io for domains using it# Generated by Django 2.2.24 on 2021-06-10 09:13
from django.db import migrations
from corehq.messaging.smsbackends.turn.models import SQLTurnWhatsAppBackend
from corehq.toggles import TURN_IO_BACKEND
def auto_enable_turnio_ff_for_certain_domains(apps, schema_editor):
for backend in SQLTurnWhatsAppBackend.active_objects.all():
domain = backend.domain
TURN_IO_BACKEND.set(item=domain, enabled=True, namespace='domain')
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('sms', '0048_delete_sqlicdsbackend'),
]
operations = [
migrations.RunPython(auto_enable_turnio_ff_for_certain_domains, reverse_code=noop),
]
|
<commit_before><commit_msg>Add migration: auto-enable Turn.io for domains using it<commit_after># Generated by Django 2.2.24 on 2021-06-10 09:13
from django.db import migrations
from corehq.messaging.smsbackends.turn.models import SQLTurnWhatsAppBackend
from corehq.toggles import TURN_IO_BACKEND
def auto_enable_turnio_ff_for_certain_domains(apps, schema_editor):
for backend in SQLTurnWhatsAppBackend.active_objects.all():
domain = backend.domain
TURN_IO_BACKEND.set(item=domain, enabled=True, namespace='domain')
def noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('sms', '0048_delete_sqlicdsbackend'),
]
operations = [
migrations.RunPython(auto_enable_turnio_ff_for_certain_domains, reverse_code=noop),
]
|
|
f1b09e1a810ea781e0e34cdd08938a8011a3dac8
|
test/test_cext.py
|
test/test_cext.py
|
def test_cryptography_path():
"""
Checks that an imported version of cryptography is actually from the
dist/cext folder. We can allow other versions, but for now, we want tests
to fail until we identify the right way to do this.
"""
try:
import cryptography
assert "dist/cext" in cryptography.__file__
except ImportError:
pass
|
Add a test case to see that we are using cryptography from the right source
|
Add a test case to see that we are using cryptography from the right source
|
Python
|
mit
|
indirectlylit/kolibri,mrpau/kolibri,mrpau/kolibri,lyw07/kolibri,learningequality/kolibri,benjaoming/kolibri,benjaoming/kolibri,lyw07/kolibri,indirectlylit/kolibri,mrpau/kolibri,DXCanas/kolibri,indirectlylit/kolibri,lyw07/kolibri,indirectlylit/kolibri,benjaoming/kolibri,mrpau/kolibri,DXCanas/kolibri,learningequality/kolibri,learningequality/kolibri,DXCanas/kolibri,benjaoming/kolibri,learningequality/kolibri,DXCanas/kolibri,lyw07/kolibri
|
Add a test case to see that we are using cryptography from the right source
|
def test_cryptography_path():
"""
Checks that an imported version of cryptography is actually from the
dist/cext folder. We can allow other versions, but for now, we want tests
to fail until we identify the right way to do this.
"""
try:
import cryptography
assert "dist/cext" in cryptography.__file__
except ImportError:
pass
|
<commit_before><commit_msg>Add a test case to see that we are using cryptography from the right source<commit_after>
|
def test_cryptography_path():
"""
Checks that an imported version of cryptography is actually from the
dist/cext folder. We can allow other versions, but for now, we want tests
to fail until we identify the right way to do this.
"""
try:
import cryptography
assert "dist/cext" in cryptography.__file__
except ImportError:
pass
|
Add a test case to see that we are using cryptography from the right source
def test_cryptography_path():
"""
Checks that an imported version of cryptography is actually from the
dist/cext folder. We can allow other versions, but for now, we want tests
to fail until we identify the right way to do this.
"""
try:
import cryptography
assert "dist/cext" in cryptography.__file__
except ImportError:
pass
|
<commit_before><commit_msg>Add a test case to see that we are using cryptography from the right source<commit_after>
def test_cryptography_path():
"""
Checks that an imported version of cryptography is actually from the
dist/cext folder. We can allow other versions, but for now, we want tests
to fail until we identify the right way to do this.
"""
try:
import cryptography
assert "dist/cext" in cryptography.__file__
except ImportError:
pass
|
|
65b93eb73e25f43f0ebe4eab343084ebb297546f
|
education/migrations/0016_repair_missing_eav_values.py
|
education/migrations/0016_repair_missing_eav_values.py
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
import re
from django.conf import settings
from django.db.transaction import commit_on_success
@commit_on_success
class Migration(DataMigration):
def forwards(self, orm):
# Find responses that don't have a stored numeric value.
numeric_responses = Response.objects.filter(has_errors=False, message__direction='I', poll__type='n')
numeric_responses.query.join(('poll_response', 'eav_value', 'id', 'entity_id'), promote=True)
missing = numeric_responses.extra(where = ["value_float IS NULL"])
# Recalculate the numeric values where we can.
regex = re.compile(r"(-?\d+(\.\d+)?)")
for response in missing:
parts = regex.split(response.message.text)
if len(parts) == 4 :
response.eav.poll_number_value = float(parts[1])
else:
response.has_errors = True
# Make sure we don't insert implausible values.
invalid = getattr(settings, "INVALID_RESPONSE", lambda response: False)
response.has_errors = response.has_errors or invalid(response)
response.save(force_update=True)
|
Repair responses that should have a numeric value stored, but don't, making sure we don't insert implausibly large values.
|
Repair responses that should have a numeric value stored, but don't,
making sure we don't insert implausibly large values.
|
Python
|
bsd-3-clause
|
unicefuganda/edtrac,unicefuganda/edtrac,unicefuganda/edtrac
|
Repair responses that should have a numeric value stored, but don't,
making sure we don't insert implausibly large values.
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
import re
from django.conf import settings
from django.db.transaction import commit_on_success
@commit_on_success
class Migration(DataMigration):
def forwards(self, orm):
# Find responses that don't have a stored numeric value.
numeric_responses = Response.objects.filter(has_errors=False, message__direction='I', poll__type='n')
numeric_responses.query.join(('poll_response', 'eav_value', 'id', 'entity_id'), promote=True)
missing = numeric_responses.extra(where = ["value_float IS NULL"])
# Recalculate the numeric values where we can.
regex = re.compile(r"(-?\d+(\.\d+)?)")
for response in missing:
parts = regex.split(response.message.text)
if len(parts) == 4 :
response.eav.poll_number_value = float(parts[1])
else:
response.has_errors = True
# Make sure we don't insert implausible values.
invalid = getattr(settings, "INVALID_RESPONSE", lambda response: False)
response.has_errors = response.has_errors or invalid(response)
response.save(force_update=True)
|
<commit_before><commit_msg>Repair responses that should have a numeric value stored, but don't,
making sure we don't insert implausibly large values.<commit_after>
|
# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
import re
from django.conf import settings
from django.db.transaction import commit_on_success
@commit_on_success
class Migration(DataMigration):
def forwards(self, orm):
# Find responses that don't have a stored numeric value.
numeric_responses = Response.objects.filter(has_errors=False, message__direction='I', poll__type='n')
numeric_responses.query.join(('poll_response', 'eav_value', 'id', 'entity_id'), promote=True)
missing = numeric_responses.extra(where = ["value_float IS NULL"])
# Recalculate the numeric values where we can.
regex = re.compile(r"(-?\d+(\.\d+)?)")
for response in missing:
parts = regex.split(response.message.text)
if len(parts) == 4 :
response.eav.poll_number_value = float(parts[1])
else:
response.has_errors = True
# Make sure we don't insert implausible values.
invalid = getattr(settings, "INVALID_RESPONSE", lambda response: False)
response.has_errors = response.has_errors or invalid(response)
response.save(force_update=True)
|
Repair responses that should have a numeric value stored, but don't,
making sure we don't insert implausibly large values.# -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
import re
from django.conf import settings
from django.db.transaction import commit_on_success
@commit_on_success
class Migration(DataMigration):
def forwards(self, orm):
# Find responses that don't have a stored numeric value.
numeric_responses = Response.objects.filter(has_errors=False, message__direction='I', poll__type='n')
numeric_responses.query.join(('poll_response', 'eav_value', 'id', 'entity_id'), promote=True)
missing = numeric_responses.extra(where = ["value_float IS NULL"])
# Recalculate the numeric values where we can.
regex = re.compile(r"(-?\d+(\.\d+)?)")
for response in missing:
parts = regex.split(response.message.text)
if len(parts) == 4 :
response.eav.poll_number_value = float(parts[1])
else:
response.has_errors = True
# Make sure we don't insert implausible values.
invalid = getattr(settings, "INVALID_RESPONSE", lambda response: False)
response.has_errors = response.has_errors or invalid(response)
response.save(force_update=True)
|
<commit_before><commit_msg>Repair responses that should have a numeric value stored, but don't,
making sure we don't insert implausibly large values.<commit_after># -*- coding: utf-8 -*-
from south.v2 import DataMigration
from education.models import Response
import re
from django.conf import settings
from django.db.transaction import commit_on_success
@commit_on_success
class Migration(DataMigration):
def forwards(self, orm):
# Find responses that don't have a stored numeric value.
numeric_responses = Response.objects.filter(has_errors=False, message__direction='I', poll__type='n')
numeric_responses.query.join(('poll_response', 'eav_value', 'id', 'entity_id'), promote=True)
missing = numeric_responses.extra(where = ["value_float IS NULL"])
# Recalculate the numeric values where we can.
regex = re.compile(r"(-?\d+(\.\d+)?)")
for response in missing:
parts = regex.split(response.message.text)
if len(parts) == 4 :
response.eav.poll_number_value = float(parts[1])
else:
response.has_errors = True
# Make sure we don't insert implausible values.
invalid = getattr(settings, "INVALID_RESPONSE", lambda response: False)
response.has_errors = response.has_errors or invalid(response)
response.save(force_update=True)
|
|
ec46ad8bd57be797a6bed7ec519cf7962bb6e1da
|
test_eigenvalue_goedbloed.py
|
test_eigenvalue_goedbloed.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 10:29:40 2014
@author: Jens von der Linden
Tests based on relations in Goedbloed (2010) Principles of MHD.
All equation references are from the book unless ofterwise noted.
"""
import eigenvalue_goedbloed as eigen
from nose.tools import assert_almost_equal, assert_greater_equal
delta = 10E-5
test_equil()
def test_equil():
"""
Create equilibrium for testing.
Can start with simple constant density, axial current and field.
"""
def test_freq_ordering():
"""
Tests the frequency ordering given in equation (9.38).
"""
radii = []
for radius in radii:
test_omega_alfven = eigen.omega_alfven()
test_omega_sound = eigen.omega_sound()
test_omega_s0 = eigen.omega_s0()
test_omega_f0 = eigen.omega_f0()
assert_greater_equal(test_omega_s0, test_omega_sound)
assert_greater_equal(test_omega_alfven, test_omega_s0)
assert_greater_equal(test_omega_f0, test_omega_alfven)
def test_n():
"""
Tests different formulations for n equations (9.32) & (9.36).
"""
test_n_freq = eigen.n_freq()
test_n_fb = eigen.n_fb()
assert_almost_equal(test_n_freq, test_n_fb, delta=delta)
def test_d():
"""
Tests different formulations for d equations (9.30) & (9.36).
"""
test_d_freq = eigen.d_freq()
test_d_fb = eigen.d_fb()
assert_almost_equal(test_d_freq, test_d_fb, delta=delta)
def test_freq_limits():
"""
Tests frequency limis given below equation (9.38).
Notes
-----
May not be possible to implement numerically as these limits are for r->0,
where the functions blow up.
"""
pass
def test_newcomb_limit():
"""
Tests limit of omega_sq=0 where the eigenvalue equations should become
newcomb's equations.
Notes
-----
Have to think about if it will be possible to implement this limit
numerically.
"""
pass
|
Add template for testing goedbloed eigenvalue problem
|
Add template for testing goedbloed eigenvalue problem
|
Python
|
mit
|
jensv/fluxtubestability,jensv/fluxtubestability
|
Add template for testing goedbloed eigenvalue problem
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 10:29:40 2014
@author: Jens von der Linden
Tests based on relations in Goedbloed (2010) Principles of MHD.
All equation references are from the book unless ofterwise noted.
"""
import eigenvalue_goedbloed as eigen
from nose.tools import assert_almost_equal, assert_greater_equal
delta = 10E-5
test_equil()
def test_equil():
"""
Create equilibrium for testing.
Can start with simple constant density, axial current and field.
"""
def test_freq_ordering():
"""
Tests the frequency ordering given in equation (9.38).
"""
radii = []
for radius in radii:
test_omega_alfven = eigen.omega_alfven()
test_omega_sound = eigen.omega_sound()
test_omega_s0 = eigen.omega_s0()
test_omega_f0 = eigen.omega_f0()
assert_greater_equal(test_omega_s0, test_omega_sound)
assert_greater_equal(test_omega_alfven, test_omega_s0)
assert_greater_equal(test_omega_f0, test_omega_alfven)
def test_n():
"""
Tests different formulations for n equations (9.32) & (9.36).
"""
test_n_freq = eigen.n_freq()
test_n_fb = eigen.n_fb()
assert_almost_equal(test_n_freq, test_n_fb, delta=delta)
def test_d():
"""
Tests different formulations for d equations (9.30) & (9.36).
"""
test_d_freq = eigen.d_freq()
test_d_fb = eigen.d_fb()
assert_almost_equal(test_d_freq, test_d_fb, delta=delta)
def test_freq_limits():
"""
Tests frequency limis given below equation (9.38).
Notes
-----
May not be possible to implement numerically as these limits are for r->0,
where the functions blow up.
"""
pass
def test_newcomb_limit():
"""
Tests limit of omega_sq=0 where the eigenvalue equations should become
newcomb's equations.
Notes
-----
Have to think about if it will be possible to implement this limit
numerically.
"""
pass
|
<commit_before><commit_msg>Add template for testing goedbloed eigenvalue problem<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 10:29:40 2014
@author: Jens von der Linden
Tests based on relations in Goedbloed (2010) Principles of MHD.
All equation references are from the book unless ofterwise noted.
"""
import eigenvalue_goedbloed as eigen
from nose.tools import assert_almost_equal, assert_greater_equal
delta = 10E-5
test_equil()
def test_equil():
"""
Create equilibrium for testing.
Can start with simple constant density, axial current and field.
"""
def test_freq_ordering():
"""
Tests the frequency ordering given in equation (9.38).
"""
radii = []
for radius in radii:
test_omega_alfven = eigen.omega_alfven()
test_omega_sound = eigen.omega_sound()
test_omega_s0 = eigen.omega_s0()
test_omega_f0 = eigen.omega_f0()
assert_greater_equal(test_omega_s0, test_omega_sound)
assert_greater_equal(test_omega_alfven, test_omega_s0)
assert_greater_equal(test_omega_f0, test_omega_alfven)
def test_n():
"""
Tests different formulations for n equations (9.32) & (9.36).
"""
test_n_freq = eigen.n_freq()
test_n_fb = eigen.n_fb()
assert_almost_equal(test_n_freq, test_n_fb, delta=delta)
def test_d():
"""
Tests different formulations for d equations (9.30) & (9.36).
"""
test_d_freq = eigen.d_freq()
test_d_fb = eigen.d_fb()
assert_almost_equal(test_d_freq, test_d_fb, delta=delta)
def test_freq_limits():
"""
Tests frequency limis given below equation (9.38).
Notes
-----
May not be possible to implement numerically as these limits are for r->0,
where the functions blow up.
"""
pass
def test_newcomb_limit():
"""
Tests limit of omega_sq=0 where the eigenvalue equations should become
newcomb's equations.
Notes
-----
Have to think about if it will be possible to implement this limit
numerically.
"""
pass
|
Add template for testing goedbloed eigenvalue problem# -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 10:29:40 2014
@author: Jens von der Linden
Tests based on relations in Goedbloed (2010) Principles of MHD.
All equation references are from the book unless ofterwise noted.
"""
import eigenvalue_goedbloed as eigen
from nose.tools import assert_almost_equal, assert_greater_equal
delta = 10E-5
test_equil()
def test_equil():
"""
Create equilibrium for testing.
Can start with simple constant density, axial current and field.
"""
def test_freq_ordering():
"""
Tests the frequency ordering given in equation (9.38).
"""
radii = []
for radius in radii:
test_omega_alfven = eigen.omega_alfven()
test_omega_sound = eigen.omega_sound()
test_omega_s0 = eigen.omega_s0()
test_omega_f0 = eigen.omega_f0()
assert_greater_equal(test_omega_s0, test_omega_sound)
assert_greater_equal(test_omega_alfven, test_omega_s0)
assert_greater_equal(test_omega_f0, test_omega_alfven)
def test_n():
"""
Tests different formulations for n equations (9.32) & (9.36).
"""
test_n_freq = eigen.n_freq()
test_n_fb = eigen.n_fb()
assert_almost_equal(test_n_freq, test_n_fb, delta=delta)
def test_d():
"""
Tests different formulations for d equations (9.30) & (9.36).
"""
test_d_freq = eigen.d_freq()
test_d_fb = eigen.d_fb()
assert_almost_equal(test_d_freq, test_d_fb, delta=delta)
def test_freq_limits():
"""
Tests frequency limis given below equation (9.38).
Notes
-----
May not be possible to implement numerically as these limits are for r->0,
where the functions blow up.
"""
pass
def test_newcomb_limit():
"""
Tests limit of omega_sq=0 where the eigenvalue equations should become
newcomb's equations.
Notes
-----
Have to think about if it will be possible to implement this limit
numerically.
"""
pass
|
<commit_before><commit_msg>Add template for testing goedbloed eigenvalue problem<commit_after># -*- coding: utf-8 -*-
"""
Created on Wed Jul 16 10:29:40 2014
@author: Jens von der Linden
Tests based on relations in Goedbloed (2010) Principles of MHD.
All equation references are from the book unless ofterwise noted.
"""
import eigenvalue_goedbloed as eigen
from nose.tools import assert_almost_equal, assert_greater_equal
delta = 10E-5
test_equil()
def test_equil():
"""
Create equilibrium for testing.
Can start with simple constant density, axial current and field.
"""
def test_freq_ordering():
"""
Tests the frequency ordering given in equation (9.38).
"""
radii = []
for radius in radii:
test_omega_alfven = eigen.omega_alfven()
test_omega_sound = eigen.omega_sound()
test_omega_s0 = eigen.omega_s0()
test_omega_f0 = eigen.omega_f0()
assert_greater_equal(test_omega_s0, test_omega_sound)
assert_greater_equal(test_omega_alfven, test_omega_s0)
assert_greater_equal(test_omega_f0, test_omega_alfven)
def test_n():
"""
Tests different formulations for n equations (9.32) & (9.36).
"""
test_n_freq = eigen.n_freq()
test_n_fb = eigen.n_fb()
assert_almost_equal(test_n_freq, test_n_fb, delta=delta)
def test_d():
"""
Tests different formulations for d equations (9.30) & (9.36).
"""
test_d_freq = eigen.d_freq()
test_d_fb = eigen.d_fb()
assert_almost_equal(test_d_freq, test_d_fb, delta=delta)
def test_freq_limits():
"""
Tests frequency limis given below equation (9.38).
Notes
-----
May not be possible to implement numerically as these limits are for r->0,
where the functions blow up.
"""
pass
def test_newcomb_limit():
"""
Tests limit of omega_sq=0 where the eigenvalue equations should become
newcomb's equations.
Notes
-----
Have to think about if it will be possible to implement this limit
numerically.
"""
pass
|
|
5fdc462be0381a5273008a7a7b702a861bffc1a4
|
ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py
|
ceilometer/storage/sqlalchemy/migrate_repo/versions/011_indexes_cleanup.py
|
# -*- encoding: utf-8 -*-
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
INDEXES = {
#`table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),),
"meter": (('ix_meter_id', 'id'),),
"alarm": (('ix_alarm_id', 'id'),),
"resource": (('ix_resource_id', 'id'),)
}
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.drop()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.create()
|
Add cleanup migration for indexes.
|
Add cleanup migration for indexes.
There are a lot of extra indexes in database (for primary key).
bp: ceilometer-db-sync-models-with-migrations
Change-Id: I44d569de81f078aef27451382baf0a3061f38be2
|
Python
|
apache-2.0
|
fabian4/ceilometer,cernops/ceilometer,sileht/aodh,redhat-openstack/ceilometer,Juniper/ceilometer,JioCloud/ceilometer,rickerc/ceilometer_audit,m1093782566/openstack_org_ceilometer,rackerlabs/instrumented-ceilometer,ityaptin/ceilometer,eayunstack/ceilometer,r-mibu/ceilometer,fabian4/ceilometer,eayunstack/ceilometer,mathslinux/ceilometer,NeCTAR-RC/ceilometer,luogangyi/Ceilometer-oVirt,maestro-hybrid-cloud/ceilometer,openstack/aodh,r-mibu/ceilometer,rickerc/ceilometer_audit,citrix-openstack-build/ceilometer,isyippee/ceilometer,citrix-openstack-build/ceilometer,rackerlabs/instrumented-ceilometer,NeCTAR-RC/ceilometer,isyippee/ceilometer,JioCloud/ceilometer,pkilambi/ceilometer,NeCTAR-RC/ceilometer,pkilambi/ceilometer,rickerc/ceilometer_audit,froyobin/ceilometer,MisterPup/Ceilometer-Juno-Extension,MisterPup/Ceilometer-Juno-Extension,sileht/aodh,tanglei528/ceilometer,luogangyi/Ceilometer-oVirt,openstack/aodh,tanglei528/ceilometer,froyobin/ceilometer,maestro-hybrid-cloud/ceilometer,Juniper/ceilometer,chungg/aodh,citrix-openstack-build/ceilometer,openstack/ceilometer,pczerkas/aodh,idegtiarov/ceilometer,m1093782566/openstack_org_ceilometer,JioCloud/ceilometer,rackerlabs/instrumented-ceilometer,idegtiarov/ceilometer,pczerkas/aodh,tanglei528/ceilometer,openstack/ceilometer,ityaptin/ceilometer,chungg/aodh,cernops/ceilometer,redhat-openstack/ceilometer,mathslinux/ceilometer
|
Add cleanup migration for indexes.
There are a lot of extra indexes in database (for primary key).
bp: ceilometer-db-sync-models-with-migrations
Change-Id: I44d569de81f078aef27451382baf0a3061f38be2
|
# -*- encoding: utf-8 -*-
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
INDEXES = {
#`table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),),
"meter": (('ix_meter_id', 'id'),),
"alarm": (('ix_alarm_id', 'id'),),
"resource": (('ix_resource_id', 'id'),)
}
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.drop()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.create()
|
<commit_before><commit_msg>Add cleanup migration for indexes.
There are a lot of extra indexes in database (for primary key).
bp: ceilometer-db-sync-models-with-migrations
Change-Id: I44d569de81f078aef27451382baf0a3061f38be2<commit_after>
|
# -*- encoding: utf-8 -*-
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
INDEXES = {
#`table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),),
"meter": (('ix_meter_id', 'id'),),
"alarm": (('ix_alarm_id', 'id'),),
"resource": (('ix_resource_id', 'id'),)
}
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.drop()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.create()
|
Add cleanup migration for indexes.
There are a lot of extra indexes in database (for primary key).
bp: ceilometer-db-sync-models-with-migrations
Change-Id: I44d569de81f078aef27451382baf0a3061f38be2# -*- encoding: utf-8 -*-
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
INDEXES = {
#`table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),),
"meter": (('ix_meter_id', 'id'),),
"alarm": (('ix_alarm_id', 'id'),),
"resource": (('ix_resource_id', 'id'),)
}
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.drop()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.create()
|
<commit_before><commit_msg>Add cleanup migration for indexes.
There are a lot of extra indexes in database (for primary key).
bp: ceilometer-db-sync-models-with-migrations
Change-Id: I44d569de81f078aef27451382baf0a3061f38be2<commit_after># -*- encoding: utf-8 -*-
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
INDEXES = {
#`table_name`: ((`index_name`, `column`),)
"user": (('ix_user_id', 'id'),),
"source": (('ix_source_id', 'id'),),
"project": (('ix_project_id', 'id'),),
"meter": (('ix_meter_id', 'id'),),
"alarm": (('ix_alarm_id', 'id'),),
"resource": (('ix_resource_id', 'id'),)
}
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.drop()
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
load_tables = dict((table_name, Table(table_name, meta, autoload=True))
for table_name in INDEXES.keys())
for table_name, indexes in INDEXES.items():
table = load_tables[table_name]
for index_name, column in indexes:
index = Index(index_name, table.c[column])
index.create()
|
|
9a467e294269ed3e222d136f3ca59ef8a6ab96a5
|
turbustat/tests/test_mahalanobis.py
|
turbustat/tests/test_mahalanobis.py
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import pytest
import warnings
from ..statistics import Mahalanobis, Mahalanobis_Distance
from ..statistics.stats_warnings import TurbuStatTestingWarning
from ._testing_data import dataset1
def test_Mahalanobis_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis(dataset1['cube'])
assert len(w) == 1
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis is an untested statistic. Its use"
" is not yet recommended.")
def test_Mahalanobis_Distance_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis_Distance(dataset1['cube'], dataset1['cube'])
# Warning is raised each time Mahalanobis is run (so twice)
assert len(w) == 3
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis_Distance is an untested metric. Its use"
" is not yet recommended.")
|
Add mahalanobis tests to check that the warning is raised
|
Add mahalanobis tests to check that the warning is raised
|
Python
|
mit
|
e-koch/TurbuStat,Astroua/TurbuStat
|
Add mahalanobis tests to check that the warning is raised
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import pytest
import warnings
from ..statistics import Mahalanobis, Mahalanobis_Distance
from ..statistics.stats_warnings import TurbuStatTestingWarning
from ._testing_data import dataset1
def test_Mahalanobis_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis(dataset1['cube'])
assert len(w) == 1
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis is an untested statistic. Its use"
" is not yet recommended.")
def test_Mahalanobis_Distance_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis_Distance(dataset1['cube'], dataset1['cube'])
# Warning is raised each time Mahalanobis is run (so twice)
assert len(w) == 3
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis_Distance is an untested metric. Its use"
" is not yet recommended.")
|
<commit_before><commit_msg>Add mahalanobis tests to check that the warning is raised<commit_after>
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import pytest
import warnings
from ..statistics import Mahalanobis, Mahalanobis_Distance
from ..statistics.stats_warnings import TurbuStatTestingWarning
from ._testing_data import dataset1
def test_Mahalanobis_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis(dataset1['cube'])
assert len(w) == 1
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis is an untested statistic. Its use"
" is not yet recommended.")
def test_Mahalanobis_Distance_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis_Distance(dataset1['cube'], dataset1['cube'])
# Warning is raised each time Mahalanobis is run (so twice)
assert len(w) == 3
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis_Distance is an untested metric. Its use"
" is not yet recommended.")
|
Add mahalanobis tests to check that the warning is raised# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import pytest
import warnings
from ..statistics import Mahalanobis, Mahalanobis_Distance
from ..statistics.stats_warnings import TurbuStatTestingWarning
from ._testing_data import dataset1
def test_Mahalanobis_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis(dataset1['cube'])
assert len(w) == 1
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis is an untested statistic. Its use"
" is not yet recommended.")
def test_Mahalanobis_Distance_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis_Distance(dataset1['cube'], dataset1['cube'])
# Warning is raised each time Mahalanobis is run (so twice)
assert len(w) == 3
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis_Distance is an untested metric. Its use"
" is not yet recommended.")
|
<commit_before><commit_msg>Add mahalanobis tests to check that the warning is raised<commit_after># Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import pytest
import warnings
from ..statistics import Mahalanobis, Mahalanobis_Distance
from ..statistics.stats_warnings import TurbuStatTestingWarning
from ._testing_data import dataset1
def test_Mahalanobis_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis(dataset1['cube'])
assert len(w) == 1
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis is an untested statistic. Its use"
" is not yet recommended.")
def test_Mahalanobis_Distance_raisewarning():
'''
Mahalanobis has not been completed yet. Ensure the warning is returned
when used.
'''
with warnings.catch_warnings(record=True) as w:
mahala = Mahalanobis_Distance(dataset1['cube'], dataset1['cube'])
# Warning is raised each time Mahalanobis is run (so twice)
assert len(w) == 3
assert w[0].category == TurbuStatTestingWarning
assert str(w[0].message) == \
("Mahalanobis_Distance is an untested metric. Its use"
" is not yet recommended.")
|
|
45e22595708e9053b27197c8d7a1d4d09c6a4861
|
notifications/migrations/0004_auto_20150826_1508.py
|
notifications/migrations/0004_auto_20150826_1508.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
Add missing migration for Notification model
|
Add missing migration for Notification model
|
Python
|
bsd-3-clause
|
philroche/django-notifications,philroche/django-notifications,philroche/django-notifications
|
Add missing migration for Notification model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
<commit_before><commit_msg>Add missing migration for Notification model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
Add missing migration for Notification model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
<commit_before><commit_msg>Add missing migration for Notification model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import notifications.models
class Migration(migrations.Migration):
dependencies = [
('notifications', '0003_notification_data'),
]
operations = [
migrations.AlterField(
model_name='notification',
name='timestamp',
field=models.DateTimeField(default=notifications.models.now),
),
]
|
|
f317ec8026e10fd53ea0c19d2347836d5398f8e4
|
begood_sites/management/commands/get_site.py
|
begood_sites/management/commands/get_site.py
|
# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Return the id, domain and name for a site'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
print "%d %s %s" % (site.id, site.domain, site.name)
|
Add a management command for getting site information.
|
Add a management command for getting site information.
|
Python
|
mit
|
AGoodId/begood-sites
|
Add a management command for getting site information.
|
# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Return the id, domain and name for a site'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
print "%d %s %s" % (site.id, site.domain, site.name)
|
<commit_before><commit_msg>Add a management command for getting site information.<commit_after>
|
# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Return the id, domain and name for a site'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
print "%d %s %s" % (site.id, site.domain, site.name)
|
Add a management command for getting site information.# coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Return the id, domain and name for a site'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
print "%d %s %s" % (site.id, site.domain, site.name)
|
<commit_before><commit_msg>Add a management command for getting site information.<commit_after># coding=utf-8
from django.db import connection
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
class Command(BaseCommand):
args = '<id_or_name>'
help = 'Return the id, domain and name for a site'
def handle(self, *args, **options):
id_or_name = args[0]
try:
site = Site.objects.get(id=int(id_or_name))
except (Site.DoesNotExist, ValueError):
try:
site = Site.objects.get(name=id_or_name)
except Site.DoesNotExist:
raise CommandError('No such site: %s' % id_or_name)
print "%d %s %s" % (site.id, site.domain, site.name)
|
|
2e44dc3c8e3e8751e649728ff5abf541c401a3e7
|
wagtail/wagtailsearch/tests/test_page_search.py
|
wagtail/wagtailsearch/tests/test_page_search.py
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.test import TestCase
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
class PageSearchTests(object):
# A TestCase with this class mixed in will be dynamically created
# for each search backend defined in WAGTAILSEARCH_BACKENDS, with the backend name available
# as self.backend_name
fixtures = ['test.json']
def setUp(self):
self.backend = get_search_backend(self.backend_name)
self.reset_index()
for page in Page.objects.all():
self.backend.add(page)
self.refresh_index()
def reset_index(self):
if self.backend.rebuilder_class:
index = self.backend.get_index_for_model(Page)
rebuilder = self.backend.rebuilder_class(index)
index = rebuilder.start()
index.add_model(Page)
rebuilder.finish()
def refresh_index(self):
index = self.backend.get_index_for_model(Page)
if index:
index.refresh()
def test_search_specific_queryset(self):
list(Page.objects.specific().search('bread', backend=self.backend_name))
def test_search_specific_queryset_with_fields(self):
list(Page.objects.specific().search('bread', fields=['title'], backend=self.backend_name))
for backend_name in settings.WAGTAILSEARCH_BACKENDS.keys():
test_name = str("Test%sBackend" % backend_name.title())
globals()[test_name] = type(test_name, (PageSearchTests, TestCase,), {'backend_name': backend_name})
|
Add tests for searching on the Page model
|
Add tests for searching on the Page model
|
Python
|
bsd-3-clause
|
FlipperPA/wagtail,thenewguy/wagtail,takeflight/wagtail,wagtail/wagtail,thenewguy/wagtail,torchbox/wagtail,jnns/wagtail,mikedingjan/wagtail,jnns/wagtail,nealtodd/wagtail,FlipperPA/wagtail,takeflight/wagtail,FlipperPA/wagtail,rsalmaso/wagtail,rsalmaso/wagtail,takeflight/wagtail,timorieber/wagtail,nimasmi/wagtail,kaedroho/wagtail,timorieber/wagtail,mixxorz/wagtail,rsalmaso/wagtail,mikedingjan/wagtail,torchbox/wagtail,mixxorz/wagtail,gasman/wagtail,thenewguy/wagtail,wagtail/wagtail,nealtodd/wagtail,thenewguy/wagtail,mixxorz/wagtail,kaedroho/wagtail,rsalmaso/wagtail,mikedingjan/wagtail,thenewguy/wagtail,wagtail/wagtail,gasman/wagtail,rsalmaso/wagtail,torchbox/wagtail,mixxorz/wagtail,timorieber/wagtail,zerolab/wagtail,nimasmi/wagtail,mikedingjan/wagtail,mixxorz/wagtail,timorieber/wagtail,wagtail/wagtail,nimasmi/wagtail,gasman/wagtail,zerolab/wagtail,gasman/wagtail,kaedroho/wagtail,zerolab/wagtail,jnns/wagtail,nealtodd/wagtail,torchbox/wagtail,kaedroho/wagtail,zerolab/wagtail,nealtodd/wagtail,zerolab/wagtail,gasman/wagtail,takeflight/wagtail,nimasmi/wagtail,FlipperPA/wagtail,jnns/wagtail,wagtail/wagtail,kaedroho/wagtail
|
Add tests for searching on the Page model
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.test import TestCase
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
class PageSearchTests(object):
# A TestCase with this class mixed in will be dynamically created
# for each search backend defined in WAGTAILSEARCH_BACKENDS, with the backend name available
# as self.backend_name
fixtures = ['test.json']
def setUp(self):
self.backend = get_search_backend(self.backend_name)
self.reset_index()
for page in Page.objects.all():
self.backend.add(page)
self.refresh_index()
def reset_index(self):
if self.backend.rebuilder_class:
index = self.backend.get_index_for_model(Page)
rebuilder = self.backend.rebuilder_class(index)
index = rebuilder.start()
index.add_model(Page)
rebuilder.finish()
def refresh_index(self):
index = self.backend.get_index_for_model(Page)
if index:
index.refresh()
def test_search_specific_queryset(self):
list(Page.objects.specific().search('bread', backend=self.backend_name))
def test_search_specific_queryset_with_fields(self):
list(Page.objects.specific().search('bread', fields=['title'], backend=self.backend_name))
for backend_name in settings.WAGTAILSEARCH_BACKENDS.keys():
test_name = str("Test%sBackend" % backend_name.title())
globals()[test_name] = type(test_name, (PageSearchTests, TestCase,), {'backend_name': backend_name})
|
<commit_before><commit_msg>Add tests for searching on the Page model<commit_after>
|
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.test import TestCase
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
class PageSearchTests(object):
# A TestCase with this class mixed in will be dynamically created
# for each search backend defined in WAGTAILSEARCH_BACKENDS, with the backend name available
# as self.backend_name
fixtures = ['test.json']
def setUp(self):
self.backend = get_search_backend(self.backend_name)
self.reset_index()
for page in Page.objects.all():
self.backend.add(page)
self.refresh_index()
def reset_index(self):
if self.backend.rebuilder_class:
index = self.backend.get_index_for_model(Page)
rebuilder = self.backend.rebuilder_class(index)
index = rebuilder.start()
index.add_model(Page)
rebuilder.finish()
def refresh_index(self):
index = self.backend.get_index_for_model(Page)
if index:
index.refresh()
def test_search_specific_queryset(self):
list(Page.objects.specific().search('bread', backend=self.backend_name))
def test_search_specific_queryset_with_fields(self):
list(Page.objects.specific().search('bread', fields=['title'], backend=self.backend_name))
for backend_name in settings.WAGTAILSEARCH_BACKENDS.keys():
test_name = str("Test%sBackend" % backend_name.title())
globals()[test_name] = type(test_name, (PageSearchTests, TestCase,), {'backend_name': backend_name})
|
Add tests for searching on the Page modelfrom __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.test import TestCase
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
class PageSearchTests(object):
# A TestCase with this class mixed in will be dynamically created
# for each search backend defined in WAGTAILSEARCH_BACKENDS, with the backend name available
# as self.backend_name
fixtures = ['test.json']
def setUp(self):
self.backend = get_search_backend(self.backend_name)
self.reset_index()
for page in Page.objects.all():
self.backend.add(page)
self.refresh_index()
def reset_index(self):
if self.backend.rebuilder_class:
index = self.backend.get_index_for_model(Page)
rebuilder = self.backend.rebuilder_class(index)
index = rebuilder.start()
index.add_model(Page)
rebuilder.finish()
def refresh_index(self):
index = self.backend.get_index_for_model(Page)
if index:
index.refresh()
def test_search_specific_queryset(self):
list(Page.objects.specific().search('bread', backend=self.backend_name))
def test_search_specific_queryset_with_fields(self):
list(Page.objects.specific().search('bread', fields=['title'], backend=self.backend_name))
for backend_name in settings.WAGTAILSEARCH_BACKENDS.keys():
test_name = str("Test%sBackend" % backend_name.title())
globals()[test_name] = type(test_name, (PageSearchTests, TestCase,), {'backend_name': backend_name})
|
<commit_before><commit_msg>Add tests for searching on the Page model<commit_after>from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.test import TestCase
from wagtail.wagtailcore.models import Page
from wagtail.wagtailsearch.backends import get_search_backend
class PageSearchTests(object):
# A TestCase with this class mixed in will be dynamically created
# for each search backend defined in WAGTAILSEARCH_BACKENDS, with the backend name available
# as self.backend_name
fixtures = ['test.json']
def setUp(self):
self.backend = get_search_backend(self.backend_name)
self.reset_index()
for page in Page.objects.all():
self.backend.add(page)
self.refresh_index()
def reset_index(self):
if self.backend.rebuilder_class:
index = self.backend.get_index_for_model(Page)
rebuilder = self.backend.rebuilder_class(index)
index = rebuilder.start()
index.add_model(Page)
rebuilder.finish()
def refresh_index(self):
index = self.backend.get_index_for_model(Page)
if index:
index.refresh()
def test_search_specific_queryset(self):
list(Page.objects.specific().search('bread', backend=self.backend_name))
def test_search_specific_queryset_with_fields(self):
list(Page.objects.specific().search('bread', fields=['title'], backend=self.backend_name))
for backend_name in settings.WAGTAILSEARCH_BACKENDS.keys():
test_name = str("Test%sBackend" % backend_name.title())
globals()[test_name] = type(test_name, (PageSearchTests, TestCase,), {'backend_name': backend_name})
|
|
7376dcf2222f24bafee9896c7b6631cbede829c8
|
bayespy/inference/vmp/nodes/tests/test_dirichlet.py
|
bayespy/inference/vmp/nodes/tests/test_dirichlet.py
|
######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for `dirichlet` module.
"""
import numpy as np
from scipy import special
from bayespy.nodes import Dirichlet
from bayespy.utils import utils
from bayespy.utils import random
from bayespy.utils.utils import TestCase
class TestDirichlet(TestCase):
"""
Unit tests for Dirichlet node
"""
def test_init(self):
"""
Test the creation of Dirichlet nodes.
"""
# Some simple initializations
p = Dirichlet([1.5, 4.2, 3.5])
# Check that plates are correct
p = Dirichlet([2, 3, 4], plates=(4,3))
self.assertEqual(p.plates,
(4,3))
p = Dirichlet(np.ones((4,3,5)))
self.assertEqual(p.plates,
(4,3))
# Parent not a vector
self.assertRaises(ValueError,
Dirichlet,
4)
# Parent vector has invalid values
self.assertRaises(ValueError,
Dirichlet,
[-2,3,1])
# Plates inconsistent
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of Dirichlet nodes.
"""
p = Dirichlet([2, 3, 4])
u = p._message_to_child()
self.assertAllClose(u[0],
special.psi([2,3,4]) - special.psi(2+3+4))
pass
def test_constant(self):
"""
Test the constant moments of Dirichlet nodes.
"""
p = Dirichlet([1, 1, 1])
p.initialize_from_value([0.5, 0.4, 0.1])
u = p._message_to_child()
self.assertAllClose(u[0],
np.log([0.5, 0.4, 0.1]))
pass
|
Add unit tests for Dirichlet node
|
TST: Add unit tests for Dirichlet node
|
Python
|
mit
|
jluttine/bayespy,bayespy/bayespy,SalemAmeen/bayespy,fivejjs/bayespy
|
TST: Add unit tests for Dirichlet node
|
######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for `dirichlet` module.
"""
import numpy as np
from scipy import special
from bayespy.nodes import Dirichlet
from bayespy.utils import utils
from bayespy.utils import random
from bayespy.utils.utils import TestCase
class TestDirichlet(TestCase):
"""
Unit tests for Dirichlet node
"""
def test_init(self):
"""
Test the creation of Dirichlet nodes.
"""
# Some simple initializations
p = Dirichlet([1.5, 4.2, 3.5])
# Check that plates are correct
p = Dirichlet([2, 3, 4], plates=(4,3))
self.assertEqual(p.plates,
(4,3))
p = Dirichlet(np.ones((4,3,5)))
self.assertEqual(p.plates,
(4,3))
# Parent not a vector
self.assertRaises(ValueError,
Dirichlet,
4)
# Parent vector has invalid values
self.assertRaises(ValueError,
Dirichlet,
[-2,3,1])
# Plates inconsistent
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of Dirichlet nodes.
"""
p = Dirichlet([2, 3, 4])
u = p._message_to_child()
self.assertAllClose(u[0],
special.psi([2,3,4]) - special.psi(2+3+4))
pass
def test_constant(self):
"""
Test the constant moments of Dirichlet nodes.
"""
p = Dirichlet([1, 1, 1])
p.initialize_from_value([0.5, 0.4, 0.1])
u = p._message_to_child()
self.assertAllClose(u[0],
np.log([0.5, 0.4, 0.1]))
pass
|
<commit_before><commit_msg>TST: Add unit tests for Dirichlet node<commit_after>
|
######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for `dirichlet` module.
"""
import numpy as np
from scipy import special
from bayespy.nodes import Dirichlet
from bayespy.utils import utils
from bayespy.utils import random
from bayespy.utils.utils import TestCase
class TestDirichlet(TestCase):
"""
Unit tests for Dirichlet node
"""
def test_init(self):
"""
Test the creation of Dirichlet nodes.
"""
# Some simple initializations
p = Dirichlet([1.5, 4.2, 3.5])
# Check that plates are correct
p = Dirichlet([2, 3, 4], plates=(4,3))
self.assertEqual(p.plates,
(4,3))
p = Dirichlet(np.ones((4,3,5)))
self.assertEqual(p.plates,
(4,3))
# Parent not a vector
self.assertRaises(ValueError,
Dirichlet,
4)
# Parent vector has invalid values
self.assertRaises(ValueError,
Dirichlet,
[-2,3,1])
# Plates inconsistent
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of Dirichlet nodes.
"""
p = Dirichlet([2, 3, 4])
u = p._message_to_child()
self.assertAllClose(u[0],
special.psi([2,3,4]) - special.psi(2+3+4))
pass
def test_constant(self):
"""
Test the constant moments of Dirichlet nodes.
"""
p = Dirichlet([1, 1, 1])
p.initialize_from_value([0.5, 0.4, 0.1])
u = p._message_to_child()
self.assertAllClose(u[0],
np.log([0.5, 0.4, 0.1]))
pass
|
TST: Add unit tests for Dirichlet node######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for `dirichlet` module.
"""
import numpy as np
from scipy import special
from bayespy.nodes import Dirichlet
from bayespy.utils import utils
from bayespy.utils import random
from bayespy.utils.utils import TestCase
class TestDirichlet(TestCase):
"""
Unit tests for Dirichlet node
"""
def test_init(self):
"""
Test the creation of Dirichlet nodes.
"""
# Some simple initializations
p = Dirichlet([1.5, 4.2, 3.5])
# Check that plates are correct
p = Dirichlet([2, 3, 4], plates=(4,3))
self.assertEqual(p.plates,
(4,3))
p = Dirichlet(np.ones((4,3,5)))
self.assertEqual(p.plates,
(4,3))
# Parent not a vector
self.assertRaises(ValueError,
Dirichlet,
4)
# Parent vector has invalid values
self.assertRaises(ValueError,
Dirichlet,
[-2,3,1])
# Plates inconsistent
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of Dirichlet nodes.
"""
p = Dirichlet([2, 3, 4])
u = p._message_to_child()
self.assertAllClose(u[0],
special.psi([2,3,4]) - special.psi(2+3+4))
pass
def test_constant(self):
"""
Test the constant moments of Dirichlet nodes.
"""
p = Dirichlet([1, 1, 1])
p.initialize_from_value([0.5, 0.4, 0.1])
u = p._message_to_child()
self.assertAllClose(u[0],
np.log([0.5, 0.4, 0.1]))
pass
|
<commit_before><commit_msg>TST: Add unit tests for Dirichlet node<commit_after>######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for `dirichlet` module.
"""
import numpy as np
from scipy import special
from bayespy.nodes import Dirichlet
from bayespy.utils import utils
from bayespy.utils import random
from bayespy.utils.utils import TestCase
class TestDirichlet(TestCase):
"""
Unit tests for Dirichlet node
"""
def test_init(self):
"""
Test the creation of Dirichlet nodes.
"""
# Some simple initializations
p = Dirichlet([1.5, 4.2, 3.5])
# Check that plates are correct
p = Dirichlet([2, 3, 4], plates=(4,3))
self.assertEqual(p.plates,
(4,3))
p = Dirichlet(np.ones((4,3,5)))
self.assertEqual(p.plates,
(4,3))
# Parent not a vector
self.assertRaises(ValueError,
Dirichlet,
4)
# Parent vector has invalid values
self.assertRaises(ValueError,
Dirichlet,
[-2,3,1])
# Plates inconsistent
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(3,))
# Explicit plates too small
self.assertRaises(ValueError,
Dirichlet,
np.ones((4,3)),
plates=(1,))
pass
def test_moments(self):
"""
Test the moments of Dirichlet nodes.
"""
p = Dirichlet([2, 3, 4])
u = p._message_to_child()
self.assertAllClose(u[0],
special.psi([2,3,4]) - special.psi(2+3+4))
pass
def test_constant(self):
"""
Test the constant moments of Dirichlet nodes.
"""
p = Dirichlet([1, 1, 1])
p.initialize_from_value([0.5, 0.4, 0.1])
u = p._message_to_child()
self.assertAllClose(u[0],
np.log([0.5, 0.4, 0.1]))
pass
|
|
f1f1239a6c33277b492e88e7cb8ebf83b3bef074
|
python/example_code/iam/list_users_with_resource.py
|
python/example_code/iam/list_users_with_resource.py
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
Add example to list IAM users with service resource
|
Add example to list IAM users with service resource
|
Python
|
apache-2.0
|
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
|
Add example to list IAM users with service resource
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
<commit_before><commit_msg>Add example to list IAM users with service resource<commit_after>
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
Add example to list IAM users with service resource# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
<commit_before><commit_msg>Add example to list IAM users with service resource<commit_after># Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
|
2bb4bc69e88431f4f89ad63243a1f3dc59ee2b29
|
pbs/create_db.py
|
pbs/create_db.py
|
#!/usr/bin/env python3
#==============================================================================
# author : Pavel Polishchuk
# date : 20-08-2018
# version :
# python_version :
# copyright : Pavel Polishchuk 2018
# license :
#==============================================================================
import argparse
import os
import re
from subprocess import Popen
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create fragment DB from sorted text files with context and fragments.')
parser.add_argument('-d', '--dir', metavar='DIR', required=True,
help='dir where input files are stored.')
args = vars(parser.parse_args())
for o, v in args.items():
if o == "dir": output_dir = os.path.abspath(v)
job_dir = os.path.join(output_dir, 'jobs')
if not os.path.exists(job_dir):
os.mkdir(job_dir)
pbs_name = os.path.join(job_dir, 'db_gen_%s.pbs' % (os.path.basename(output_dir)))
script = """
#!/usr/bin/env bash
#PBS -l select=1:ncpus=32
#PBS -k oe
cd %s
""" % output_dir
for fname in sorted(os.listdir(output_dir)):
if re.search('^r[1-5]_c\.txt$', fname):
script += 'python3 ~/python/crem/import_env_to_db.py -i %s -o replacements.db -t radius%s -c\n' % (fname, fname[1])
with open(pbs_name, "wt") as f:
f.write(script)
p = Popen(['qsub', pbs_name], encoding='utf8')
|
Add script to run PBS jobs to collect all data to fragment database
|
Add script to run PBS jobs to collect all data to fragment database
|
Python
|
bsd-3-clause
|
DrrDom/crem,DrrDom/crem
|
Add script to run PBS jobs to collect all data to fragment database
|
#!/usr/bin/env python3
#==============================================================================
# author : Pavel Polishchuk
# date : 20-08-2018
# version :
# python_version :
# copyright : Pavel Polishchuk 2018
# license :
#==============================================================================
import argparse
import os
import re
from subprocess import Popen
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create fragment DB from sorted text files with context and fragments.')
parser.add_argument('-d', '--dir', metavar='DIR', required=True,
help='dir where input files are stored.')
args = vars(parser.parse_args())
for o, v in args.items():
if o == "dir": output_dir = os.path.abspath(v)
job_dir = os.path.join(output_dir, 'jobs')
if not os.path.exists(job_dir):
os.mkdir(job_dir)
pbs_name = os.path.join(job_dir, 'db_gen_%s.pbs' % (os.path.basename(output_dir)))
script = """
#!/usr/bin/env bash
#PBS -l select=1:ncpus=32
#PBS -k oe
cd %s
""" % output_dir
for fname in sorted(os.listdir(output_dir)):
if re.search('^r[1-5]_c\.txt$', fname):
script += 'python3 ~/python/crem/import_env_to_db.py -i %s -o replacements.db -t radius%s -c\n' % (fname, fname[1])
with open(pbs_name, "wt") as f:
f.write(script)
p = Popen(['qsub', pbs_name], encoding='utf8')
|
<commit_before><commit_msg>Add script to run PBS jobs to collect all data to fragment database<commit_after>
|
#!/usr/bin/env python3
#==============================================================================
# author : Pavel Polishchuk
# date : 20-08-2018
# version :
# python_version :
# copyright : Pavel Polishchuk 2018
# license :
#==============================================================================
import argparse
import os
import re
from subprocess import Popen
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create fragment DB from sorted text files with context and fragments.')
parser.add_argument('-d', '--dir', metavar='DIR', required=True,
help='dir where input files are stored.')
args = vars(parser.parse_args())
for o, v in args.items():
if o == "dir": output_dir = os.path.abspath(v)
job_dir = os.path.join(output_dir, 'jobs')
if not os.path.exists(job_dir):
os.mkdir(job_dir)
pbs_name = os.path.join(job_dir, 'db_gen_%s.pbs' % (os.path.basename(output_dir)))
script = """
#!/usr/bin/env bash
#PBS -l select=1:ncpus=32
#PBS -k oe
cd %s
""" % output_dir
for fname in sorted(os.listdir(output_dir)):
if re.search('^r[1-5]_c\.txt$', fname):
script += 'python3 ~/python/crem/import_env_to_db.py -i %s -o replacements.db -t radius%s -c\n' % (fname, fname[1])
with open(pbs_name, "wt") as f:
f.write(script)
p = Popen(['qsub', pbs_name], encoding='utf8')
|
Add script to run PBS jobs to collect all data to fragment database#!/usr/bin/env python3
#==============================================================================
# author : Pavel Polishchuk
# date : 20-08-2018
# version :
# python_version :
# copyright : Pavel Polishchuk 2018
# license :
#==============================================================================
import argparse
import os
import re
from subprocess import Popen
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create fragment DB from sorted text files with context and fragments.')
parser.add_argument('-d', '--dir', metavar='DIR', required=True,
help='dir where input files are stored.')
args = vars(parser.parse_args())
for o, v in args.items():
if o == "dir": output_dir = os.path.abspath(v)
job_dir = os.path.join(output_dir, 'jobs')
if not os.path.exists(job_dir):
os.mkdir(job_dir)
pbs_name = os.path.join(job_dir, 'db_gen_%s.pbs' % (os.path.basename(output_dir)))
script = """
#!/usr/bin/env bash
#PBS -l select=1:ncpus=32
#PBS -k oe
cd %s
""" % output_dir
for fname in sorted(os.listdir(output_dir)):
if re.search('^r[1-5]_c\.txt$', fname):
script += 'python3 ~/python/crem/import_env_to_db.py -i %s -o replacements.db -t radius%s -c\n' % (fname, fname[1])
with open(pbs_name, "wt") as f:
f.write(script)
p = Popen(['qsub', pbs_name], encoding='utf8')
|
<commit_before><commit_msg>Add script to run PBS jobs to collect all data to fragment database<commit_after>#!/usr/bin/env python3
#==============================================================================
# author : Pavel Polishchuk
# date : 20-08-2018
# version :
# python_version :
# copyright : Pavel Polishchuk 2018
# license :
#==============================================================================
import argparse
import os
import re
from subprocess import Popen
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create fragment DB from sorted text files with context and fragments.')
parser.add_argument('-d', '--dir', metavar='DIR', required=True,
help='dir where input files are stored.')
args = vars(parser.parse_args())
for o, v in args.items():
if o == "dir": output_dir = os.path.abspath(v)
job_dir = os.path.join(output_dir, 'jobs')
if not os.path.exists(job_dir):
os.mkdir(job_dir)
pbs_name = os.path.join(job_dir, 'db_gen_%s.pbs' % (os.path.basename(output_dir)))
script = """
#!/usr/bin/env bash
#PBS -l select=1:ncpus=32
#PBS -k oe
cd %s
""" % output_dir
for fname in sorted(os.listdir(output_dir)):
if re.search('^r[1-5]_c\.txt$', fname):
script += 'python3 ~/python/crem/import_env_to_db.py -i %s -o replacements.db -t radius%s -c\n' % (fname, fname[1])
with open(pbs_name, "wt") as f:
f.write(script)
p = Popen(['qsub', pbs_name], encoding='utf8')
|
|
fefd4db49bf5590a1d95eb59ae6de9a63131f5ef
|
mutation/mutation_results.py
|
mutation/mutation_results.py
|
import matplotlib.pyplot as plt
import numpy
class Result:
def __init__(self, mut_type, schemes, results):
self.mut_type = mut_type
self.schemes = schemes
self.results = results
def __repr__(self):
return "result - " + str(self.mut_type) + "-" + str(self.results)
class MutationResults:
def __init__(self, sbfl_dir):
self.sbfl_dir = sbfl_dir
self.trans_array = ["F2P", "RSS", "UML2ER", "GM", "Kiltera"]
def calc(self):
for trans in self.trans_array:
results_dir = "models_" + trans + "/results/"
results_file = self.sbfl_dir + results_dir + "results_AC.csv"
print("Opening file: " + results_file)
results_arr = []
schemes = []
first_line = True
with open(results_file) as f:
for line in f:
if first_line:
first_line = False
for s in line.split(";"):
if not s:
continue
if "PCs" in s:
break
schemes.append(s)
continue
tokens = line.split(";")
mut_type = tokens[0].split("'")[1]
results = {}
for i, t in enumerate(tokens):
if i == 0:
continue
if not t.strip():
break
scheme = schemes[i-1]
results[scheme] = t
result = Result(mut_type, schemes, results)
results_arr.append(result)
self.parse_results(trans, schemes, results_arr)
def parse_results(self, trans, schemes, results_arr):
print("For trans: " + trans)
bins = list(numpy.arange(0, 1, 0.1))
# order schemes by avg
schemes_with_avg = []
for scheme in schemes:
scores = [float(result.results[scheme]) for result in results_arr]
schemes_with_avg.append((scheme, numpy.average(scores)))
schemes_with_avg.sort(key=lambda t: t[1])
print(schemes_with_avg)
fig, axs = plt.subplots(len(schemes))
fig.suptitle("Scores for " + trans)
for i, scheme_and_score in enumerate(schemes_with_avg):
scheme, avg = scheme_and_score
scores = [float(result.results[scheme]) for result in results_arr]
print("Scheme: " + scheme + " - " + str(avg))
print(scores)
axs[i].title.set_text(scheme)
axs[i].hist(scores, bins=bins)
plt.show()
if __name__ == "__main__":
sbfl_dir = "/home/xubuntu/Projects/SBFL/"
mr = MutationResults(sbfl_dir)
mr.calc()
|
Add script for parsing mutation results.
|
Add script for parsing mutation results.
|
Python
|
mit
|
levilucio/SyVOLT,levilucio/SyVOLT
|
Add script for parsing mutation results.
|
import matplotlib.pyplot as plt
import numpy
class Result:
def __init__(self, mut_type, schemes, results):
self.mut_type = mut_type
self.schemes = schemes
self.results = results
def __repr__(self):
return "result - " + str(self.mut_type) + "-" + str(self.results)
class MutationResults:
def __init__(self, sbfl_dir):
self.sbfl_dir = sbfl_dir
self.trans_array = ["F2P", "RSS", "UML2ER", "GM", "Kiltera"]
def calc(self):
for trans in self.trans_array:
results_dir = "models_" + trans + "/results/"
results_file = self.sbfl_dir + results_dir + "results_AC.csv"
print("Opening file: " + results_file)
results_arr = []
schemes = []
first_line = True
with open(results_file) as f:
for line in f:
if first_line:
first_line = False
for s in line.split(";"):
if not s:
continue
if "PCs" in s:
break
schemes.append(s)
continue
tokens = line.split(";")
mut_type = tokens[0].split("'")[1]
results = {}
for i, t in enumerate(tokens):
if i == 0:
continue
if not t.strip():
break
scheme = schemes[i-1]
results[scheme] = t
result = Result(mut_type, schemes, results)
results_arr.append(result)
self.parse_results(trans, schemes, results_arr)
def parse_results(self, trans, schemes, results_arr):
print("For trans: " + trans)
bins = list(numpy.arange(0, 1, 0.1))
# order schemes by avg
schemes_with_avg = []
for scheme in schemes:
scores = [float(result.results[scheme]) for result in results_arr]
schemes_with_avg.append((scheme, numpy.average(scores)))
schemes_with_avg.sort(key=lambda t: t[1])
print(schemes_with_avg)
fig, axs = plt.subplots(len(schemes))
fig.suptitle("Scores for " + trans)
for i, scheme_and_score in enumerate(schemes_with_avg):
scheme, avg = scheme_and_score
scores = [float(result.results[scheme]) for result in results_arr]
print("Scheme: " + scheme + " - " + str(avg))
print(scores)
axs[i].title.set_text(scheme)
axs[i].hist(scores, bins=bins)
plt.show()
if __name__ == "__main__":
sbfl_dir = "/home/xubuntu/Projects/SBFL/"
mr = MutationResults(sbfl_dir)
mr.calc()
|
<commit_before><commit_msg>Add script for parsing mutation results.<commit_after>
|
import matplotlib.pyplot as plt
import numpy
class Result:
def __init__(self, mut_type, schemes, results):
self.mut_type = mut_type
self.schemes = schemes
self.results = results
def __repr__(self):
return "result - " + str(self.mut_type) + "-" + str(self.results)
class MutationResults:
def __init__(self, sbfl_dir):
self.sbfl_dir = sbfl_dir
self.trans_array = ["F2P", "RSS", "UML2ER", "GM", "Kiltera"]
def calc(self):
for trans in self.trans_array:
results_dir = "models_" + trans + "/results/"
results_file = self.sbfl_dir + results_dir + "results_AC.csv"
print("Opening file: " + results_file)
results_arr = []
schemes = []
first_line = True
with open(results_file) as f:
for line in f:
if first_line:
first_line = False
for s in line.split(";"):
if not s:
continue
if "PCs" in s:
break
schemes.append(s)
continue
tokens = line.split(";")
mut_type = tokens[0].split("'")[1]
results = {}
for i, t in enumerate(tokens):
if i == 0:
continue
if not t.strip():
break
scheme = schemes[i-1]
results[scheme] = t
result = Result(mut_type, schemes, results)
results_arr.append(result)
self.parse_results(trans, schemes, results_arr)
def parse_results(self, trans, schemes, results_arr):
print("For trans: " + trans)
bins = list(numpy.arange(0, 1, 0.1))
# order schemes by avg
schemes_with_avg = []
for scheme in schemes:
scores = [float(result.results[scheme]) for result in results_arr]
schemes_with_avg.append((scheme, numpy.average(scores)))
schemes_with_avg.sort(key=lambda t: t[1])
print(schemes_with_avg)
fig, axs = plt.subplots(len(schemes))
fig.suptitle("Scores for " + trans)
for i, scheme_and_score in enumerate(schemes_with_avg):
scheme, avg = scheme_and_score
scores = [float(result.results[scheme]) for result in results_arr]
print("Scheme: " + scheme + " - " + str(avg))
print(scores)
axs[i].title.set_text(scheme)
axs[i].hist(scores, bins=bins)
plt.show()
if __name__ == "__main__":
sbfl_dir = "/home/xubuntu/Projects/SBFL/"
mr = MutationResults(sbfl_dir)
mr.calc()
|
Add script for parsing mutation results.import matplotlib.pyplot as plt
import numpy
class Result:
def __init__(self, mut_type, schemes, results):
self.mut_type = mut_type
self.schemes = schemes
self.results = results
def __repr__(self):
return "result - " + str(self.mut_type) + "-" + str(self.results)
class MutationResults:
def __init__(self, sbfl_dir):
self.sbfl_dir = sbfl_dir
self.trans_array = ["F2P", "RSS", "UML2ER", "GM", "Kiltera"]
def calc(self):
for trans in self.trans_array:
results_dir = "models_" + trans + "/results/"
results_file = self.sbfl_dir + results_dir + "results_AC.csv"
print("Opening file: " + results_file)
results_arr = []
schemes = []
first_line = True
with open(results_file) as f:
for line in f:
if first_line:
first_line = False
for s in line.split(";"):
if not s:
continue
if "PCs" in s:
break
schemes.append(s)
continue
tokens = line.split(";")
mut_type = tokens[0].split("'")[1]
results = {}
for i, t in enumerate(tokens):
if i == 0:
continue
if not t.strip():
break
scheme = schemes[i-1]
results[scheme] = t
result = Result(mut_type, schemes, results)
results_arr.append(result)
self.parse_results(trans, schemes, results_arr)
def parse_results(self, trans, schemes, results_arr):
print("For trans: " + trans)
bins = list(numpy.arange(0, 1, 0.1))
# order schemes by avg
schemes_with_avg = []
for scheme in schemes:
scores = [float(result.results[scheme]) for result in results_arr]
schemes_with_avg.append((scheme, numpy.average(scores)))
schemes_with_avg.sort(key=lambda t: t[1])
print(schemes_with_avg)
fig, axs = plt.subplots(len(schemes))
fig.suptitle("Scores for " + trans)
for i, scheme_and_score in enumerate(schemes_with_avg):
scheme, avg = scheme_and_score
scores = [float(result.results[scheme]) for result in results_arr]
print("Scheme: " + scheme + " - " + str(avg))
print(scores)
axs[i].title.set_text(scheme)
axs[i].hist(scores, bins=bins)
plt.show()
if __name__ == "__main__":
sbfl_dir = "/home/xubuntu/Projects/SBFL/"
mr = MutationResults(sbfl_dir)
mr.calc()
|
<commit_before><commit_msg>Add script for parsing mutation results.<commit_after>import matplotlib.pyplot as plt
import numpy
class Result:
def __init__(self, mut_type, schemes, results):
self.mut_type = mut_type
self.schemes = schemes
self.results = results
def __repr__(self):
return "result - " + str(self.mut_type) + "-" + str(self.results)
class MutationResults:
def __init__(self, sbfl_dir):
self.sbfl_dir = sbfl_dir
self.trans_array = ["F2P", "RSS", "UML2ER", "GM", "Kiltera"]
def calc(self):
for trans in self.trans_array:
results_dir = "models_" + trans + "/results/"
results_file = self.sbfl_dir + results_dir + "results_AC.csv"
print("Opening file: " + results_file)
results_arr = []
schemes = []
first_line = True
with open(results_file) as f:
for line in f:
if first_line:
first_line = False
for s in line.split(";"):
if not s:
continue
if "PCs" in s:
break
schemes.append(s)
continue
tokens = line.split(";")
mut_type = tokens[0].split("'")[1]
results = {}
for i, t in enumerate(tokens):
if i == 0:
continue
if not t.strip():
break
scheme = schemes[i-1]
results[scheme] = t
result = Result(mut_type, schemes, results)
results_arr.append(result)
self.parse_results(trans, schemes, results_arr)
def parse_results(self, trans, schemes, results_arr):
print("For trans: " + trans)
bins = list(numpy.arange(0, 1, 0.1))
# order schemes by avg
schemes_with_avg = []
for scheme in schemes:
scores = [float(result.results[scheme]) for result in results_arr]
schemes_with_avg.append((scheme, numpy.average(scores)))
schemes_with_avg.sort(key=lambda t: t[1])
print(schemes_with_avg)
fig, axs = plt.subplots(len(schemes))
fig.suptitle("Scores for " + trans)
for i, scheme_and_score in enumerate(schemes_with_avg):
scheme, avg = scheme_and_score
scores = [float(result.results[scheme]) for result in results_arr]
print("Scheme: " + scheme + " - " + str(avg))
print(scores)
axs[i].title.set_text(scheme)
axs[i].hist(scores, bins=bins)
plt.show()
if __name__ == "__main__":
sbfl_dir = "/home/xubuntu/Projects/SBFL/"
mr = MutationResults(sbfl_dir)
mr.calc()
|
|
7079614f35de60def5f4e1cc1cb17cf3e5b4d9c6
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=open("README.rst").read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
Change way README is imported.
|
Change way README is imported.
The custom read function is unnecessary since only one file is being
accessed. Removing it reduces the amount of code.
|
Python
|
apache-2.0
|
Aloomaio/facebook-sdk,mobolic/facebook-sdk
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
Change way README is imported.
The custom read function is unnecessary since only one file is being
accessed. Removing it reduces the amount of code.
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=open("README.rst").read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
<commit_before>#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
<commit_msg>Change way README is imported.
The custom read function is unnecessary since only one file is being
accessed. Removing it reduces the amount of code.<commit_after>
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=open("README.rst").read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
Change way README is imported.
The custom read function is unnecessary since only one file is being
accessed. Removing it reduces the amount of code.#!/usr/bin/env python
from distutils.core import setup
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=open("README.rst").read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
<commit_before>#!/usr/bin/env python
from distutils.core import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=read("README.rst"),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
<commit_msg>Change way README is imported.
The custom read function is unnecessary since only one file is being
accessed. Removing it reduces the amount of code.<commit_after>#!/usr/bin/env python
from distutils.core import setup
setup(
name='facebook-sdk',
version='0.3.2',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=open("README.rst").read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
],
)
|
b14d8021dc936be4bb2d8684f1268f8422953fac
|
dump-classes.py
|
dump-classes.py
|
#!/usr/bin/python
from subprocess import call
import glob
import os
import sys
import fileinput
import re
destination_path = 'Multiplex/FrameworkHeaders/'
def dump_all_frameworks():
# 3 different directories contain all of the frameworks a plugin may interface with.
# They're located at {APP_DIR}/Contents/
shared_frameworks = ['DVTFoundation', 'DVTKit']
frameworks = ['IDEFoundation']
other_frameworks = ['']
for framework in shared_frameworks:
dump_framework(frameworkPath('SharedFrameworks', framework), frameworkDumpDestination(framework))
cleanup_dumped_files()
def frameworkPath(frameworkDir, frameworkName):
framework_root_directory = '/Applications/Xcode-beta.app/Contents/'
return framework_root_directory + frameworkDir + '/' + frameworkName + '.framework/' + frameworkName
def frameworkDumpDestination(frameworkName):
return destination_path + frameworkName
def dump_framework(path, destinationDir):
call(['class-dump', path, '-H', '-s', '-o', destinationDir])
def cleanup_dumped_files():
relative_paths = glob.glob(destination_path + '/*/*.h')
for relativePath in relative_paths:
absolute_path = os.path.abspath(relativePath)
cleanFile(absolute_path)
def cleanFile(filePath):
tempName = filePath + '.tmp'
inputFile = open(filePath)
outputFile = open(tempName, 'w')
fileContent = unicode(inputFile.read(), "utf-8")
# Remove Foundation imports
outText = re.sub('#import "NS(.*?).h"\n', '', fileContent)
# Remove .cxx_destructs
outText = re.sub('- \(void\).cxx_destruct;\n', '', outText)
# Fix delegate imports
outText = re.sub('.h"', '-Protocol.h"', outText)
# Add import for structs
outText = re.sub('//\n\n', '//\n\n#import "CDStructures.h"\n', outText)
# Change the unknown block type to a generic block that doesn't need an import
outText = re.sub('CDUnknownBlockType', 'dispatch_block_t', outText)
# Remove protocols from ivars as they're not supported
outText = re.sub('<(.*?)> (\*|)', ' ' + r"\2", outText)
outputFile.write((outText.encode("utf-8")))
outputFile.close()
inputFile.close()
os.rename(tempName, filePath)
dump_all_frameworks()
|
Add class dump automation script
|
Add class dump automation script
|
Python
|
mit
|
kolinkrewinkel/Multiplex,kolinkrewinkel/Multiplex,kolinkrewinkel/Multiplex
|
Add class dump automation script
|
#!/usr/bin/python
from subprocess import call
import glob
import os
import sys
import fileinput
import re
destination_path = 'Multiplex/FrameworkHeaders/'
def dump_all_frameworks():
# 3 different directories contain all of the frameworks a plugin may interface with.
# They're located at {APP_DIR}/Contents/
shared_frameworks = ['DVTFoundation', 'DVTKit']
frameworks = ['IDEFoundation']
other_frameworks = ['']
for framework in shared_frameworks:
dump_framework(frameworkPath('SharedFrameworks', framework), frameworkDumpDestination(framework))
cleanup_dumped_files()
def frameworkPath(frameworkDir, frameworkName):
framework_root_directory = '/Applications/Xcode-beta.app/Contents/'
return framework_root_directory + frameworkDir + '/' + frameworkName + '.framework/' + frameworkName
def frameworkDumpDestination(frameworkName):
return destination_path + frameworkName
def dump_framework(path, destinationDir):
call(['class-dump', path, '-H', '-s', '-o', destinationDir])
def cleanup_dumped_files():
relative_paths = glob.glob(destination_path + '/*/*.h')
for relativePath in relative_paths:
absolute_path = os.path.abspath(relativePath)
cleanFile(absolute_path)
def cleanFile(filePath):
tempName = filePath + '.tmp'
inputFile = open(filePath)
outputFile = open(tempName, 'w')
fileContent = unicode(inputFile.read(), "utf-8")
# Remove Foundation imports
outText = re.sub('#import "NS(.*?).h"\n', '', fileContent)
# Remove .cxx_destructs
outText = re.sub('- \(void\).cxx_destruct;\n', '', outText)
# Fix delegate imports
outText = re.sub('.h"', '-Protocol.h"', outText)
# Add import for structs
outText = re.sub('//\n\n', '//\n\n#import "CDStructures.h"\n', outText)
# Change the unknown block type to a generic block that doesn't need an import
outText = re.sub('CDUnknownBlockType', 'dispatch_block_t', outText)
# Remove protocols from ivars as they're not supported
outText = re.sub('<(.*?)> (\*|)', ' ' + r"\2", outText)
outputFile.write((outText.encode("utf-8")))
outputFile.close()
inputFile.close()
os.rename(tempName, filePath)
dump_all_frameworks()
|
<commit_before><commit_msg>Add class dump automation script<commit_after>
|
#!/usr/bin/python
from subprocess import call
import glob
import os
import sys
import fileinput
import re
destination_path = 'Multiplex/FrameworkHeaders/'
def dump_all_frameworks():
# 3 different directories contain all of the frameworks a plugin may interface with.
# They're located at {APP_DIR}/Contents/
shared_frameworks = ['DVTFoundation', 'DVTKit']
frameworks = ['IDEFoundation']
other_frameworks = ['']
for framework in shared_frameworks:
dump_framework(frameworkPath('SharedFrameworks', framework), frameworkDumpDestination(framework))
cleanup_dumped_files()
def frameworkPath(frameworkDir, frameworkName):
framework_root_directory = '/Applications/Xcode-beta.app/Contents/'
return framework_root_directory + frameworkDir + '/' + frameworkName + '.framework/' + frameworkName
def frameworkDumpDestination(frameworkName):
return destination_path + frameworkName
def dump_framework(path, destinationDir):
call(['class-dump', path, '-H', '-s', '-o', destinationDir])
def cleanup_dumped_files():
relative_paths = glob.glob(destination_path + '/*/*.h')
for relativePath in relative_paths:
absolute_path = os.path.abspath(relativePath)
cleanFile(absolute_path)
def cleanFile(filePath):
tempName = filePath + '.tmp'
inputFile = open(filePath)
outputFile = open(tempName, 'w')
fileContent = unicode(inputFile.read(), "utf-8")
# Remove Foundation imports
outText = re.sub('#import "NS(.*?).h"\n', '', fileContent)
# Remove .cxx_destructs
outText = re.sub('- \(void\).cxx_destruct;\n', '', outText)
# Fix delegate imports
outText = re.sub('.h"', '-Protocol.h"', outText)
# Add import for structs
outText = re.sub('//\n\n', '//\n\n#import "CDStructures.h"\n', outText)
# Change the unknown block type to a generic block that doesn't need an import
outText = re.sub('CDUnknownBlockType', 'dispatch_block_t', outText)
# Remove protocols from ivars as they're not supported
outText = re.sub('<(.*?)> (\*|)', ' ' + r"\2", outText)
outputFile.write((outText.encode("utf-8")))
outputFile.close()
inputFile.close()
os.rename(tempName, filePath)
dump_all_frameworks()
|
Add class dump automation script#!/usr/bin/python
from subprocess import call
import glob
import os
import sys
import fileinput
import re
destination_path = 'Multiplex/FrameworkHeaders/'
def dump_all_frameworks():
# 3 different directories contain all of the frameworks a plugin may interface with.
# They're located at {APP_DIR}/Contents/
shared_frameworks = ['DVTFoundation', 'DVTKit']
frameworks = ['IDEFoundation']
other_frameworks = ['']
for framework in shared_frameworks:
dump_framework(frameworkPath('SharedFrameworks', framework), frameworkDumpDestination(framework))
cleanup_dumped_files()
def frameworkPath(frameworkDir, frameworkName):
framework_root_directory = '/Applications/Xcode-beta.app/Contents/'
return framework_root_directory + frameworkDir + '/' + frameworkName + '.framework/' + frameworkName
def frameworkDumpDestination(frameworkName):
return destination_path + frameworkName
def dump_framework(path, destinationDir):
call(['class-dump', path, '-H', '-s', '-o', destinationDir])
def cleanup_dumped_files():
relative_paths = glob.glob(destination_path + '/*/*.h')
for relativePath in relative_paths:
absolute_path = os.path.abspath(relativePath)
cleanFile(absolute_path)
def cleanFile(filePath):
tempName = filePath + '.tmp'
inputFile = open(filePath)
outputFile = open(tempName, 'w')
fileContent = unicode(inputFile.read(), "utf-8")
# Remove Foundation imports
outText = re.sub('#import "NS(.*?).h"\n', '', fileContent)
# Remove .cxx_destructs
outText = re.sub('- \(void\).cxx_destruct;\n', '', outText)
# Fix delegate imports
outText = re.sub('.h"', '-Protocol.h"', outText)
# Add import for structs
outText = re.sub('//\n\n', '//\n\n#import "CDStructures.h"\n', outText)
# Change the unknown block type to a generic block that doesn't need an import
outText = re.sub('CDUnknownBlockType', 'dispatch_block_t', outText)
# Remove protocols from ivars as they're not supported
outText = re.sub('<(.*?)> (\*|)', ' ' + r"\2", outText)
outputFile.write((outText.encode("utf-8")))
outputFile.close()
inputFile.close()
os.rename(tempName, filePath)
dump_all_frameworks()
|
<commit_before><commit_msg>Add class dump automation script<commit_after>#!/usr/bin/python
from subprocess import call
import glob
import os
import sys
import fileinput
import re
destination_path = 'Multiplex/FrameworkHeaders/'
def dump_all_frameworks():
# 3 different directories contain all of the frameworks a plugin may interface with.
# They're located at {APP_DIR}/Contents/
shared_frameworks = ['DVTFoundation', 'DVTKit']
frameworks = ['IDEFoundation']
other_frameworks = ['']
for framework in shared_frameworks:
dump_framework(frameworkPath('SharedFrameworks', framework), frameworkDumpDestination(framework))
cleanup_dumped_files()
def frameworkPath(frameworkDir, frameworkName):
framework_root_directory = '/Applications/Xcode-beta.app/Contents/'
return framework_root_directory + frameworkDir + '/' + frameworkName + '.framework/' + frameworkName
def frameworkDumpDestination(frameworkName):
return destination_path + frameworkName
def dump_framework(path, destinationDir):
call(['class-dump', path, '-H', '-s', '-o', destinationDir])
def cleanup_dumped_files():
relative_paths = glob.glob(destination_path + '/*/*.h')
for relativePath in relative_paths:
absolute_path = os.path.abspath(relativePath)
cleanFile(absolute_path)
def cleanFile(filePath):
tempName = filePath + '.tmp'
inputFile = open(filePath)
outputFile = open(tempName, 'w')
fileContent = unicode(inputFile.read(), "utf-8")
# Remove Foundation imports
outText = re.sub('#import "NS(.*?).h"\n', '', fileContent)
# Remove .cxx_destructs
outText = re.sub('- \(void\).cxx_destruct;\n', '', outText)
# Fix delegate imports
outText = re.sub('.h"', '-Protocol.h"', outText)
# Add import for structs
outText = re.sub('//\n\n', '//\n\n#import "CDStructures.h"\n', outText)
# Change the unknown block type to a generic block that doesn't need an import
outText = re.sub('CDUnknownBlockType', 'dispatch_block_t', outText)
# Remove protocols from ivars as they're not supported
outText = re.sub('<(.*?)> (\*|)', ' ' + r"\2", outText)
outputFile.write((outText.encode("utf-8")))
outputFile.close()
inputFile.close()
os.rename(tempName, filePath)
dump_all_frameworks()
|
|
333908ed1a01fba4429f417df012b5db5c75b1b4
|
slurp.py
|
slurp.py
|
#!/usr/bin/env python
def log(msg):
print("[+] %s" % msg)
import os
import sys
import bz2
from groundstation.station import Station
from groundstation.node import Node
from groundstation.objects.update_object import UpdateObject
def main():
myself = Node()
station = Station.from_env(myself)
filename = sys.argv[1]
log("Stealing %s" % filename)
with open(filename) as fh:
obj = UpdateObject([],
os.path.basename(filename) +
chr(0x00) +
bz2.compress(fh.read())
)
name = station.write(obj.as_object())
log("Wrote %s into stationdb" % name)
if __name__ == '__main__':
main()
|
Add a script for dumping data into the graphdb
|
Add a script for dumping data into the graphdb
|
Python
|
mit
|
richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation
|
Add a script for dumping data into the graphdb
|
#!/usr/bin/env python
def log(msg):
print("[+] %s" % msg)
import os
import sys
import bz2
from groundstation.station import Station
from groundstation.node import Node
from groundstation.objects.update_object import UpdateObject
def main():
myself = Node()
station = Station.from_env(myself)
filename = sys.argv[1]
log("Stealing %s" % filename)
with open(filename) as fh:
obj = UpdateObject([],
os.path.basename(filename) +
chr(0x00) +
bz2.compress(fh.read())
)
name = station.write(obj.as_object())
log("Wrote %s into stationdb" % name)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for dumping data into the graphdb<commit_after>
|
#!/usr/bin/env python
def log(msg):
print("[+] %s" % msg)
import os
import sys
import bz2
from groundstation.station import Station
from groundstation.node import Node
from groundstation.objects.update_object import UpdateObject
def main():
myself = Node()
station = Station.from_env(myself)
filename = sys.argv[1]
log("Stealing %s" % filename)
with open(filename) as fh:
obj = UpdateObject([],
os.path.basename(filename) +
chr(0x00) +
bz2.compress(fh.read())
)
name = station.write(obj.as_object())
log("Wrote %s into stationdb" % name)
if __name__ == '__main__':
main()
|
Add a script for dumping data into the graphdb#!/usr/bin/env python
def log(msg):
print("[+] %s" % msg)
import os
import sys
import bz2
from groundstation.station import Station
from groundstation.node import Node
from groundstation.objects.update_object import UpdateObject
def main():
myself = Node()
station = Station.from_env(myself)
filename = sys.argv[1]
log("Stealing %s" % filename)
with open(filename) as fh:
obj = UpdateObject([],
os.path.basename(filename) +
chr(0x00) +
bz2.compress(fh.read())
)
name = station.write(obj.as_object())
log("Wrote %s into stationdb" % name)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for dumping data into the graphdb<commit_after>#!/usr/bin/env python
def log(msg):
print("[+] %s" % msg)
import os
import sys
import bz2
from groundstation.station import Station
from groundstation.node import Node
from groundstation.objects.update_object import UpdateObject
def main():
myself = Node()
station = Station.from_env(myself)
filename = sys.argv[1]
log("Stealing %s" % filename)
with open(filename) as fh:
obj = UpdateObject([],
os.path.basename(filename) +
chr(0x00) +
bz2.compress(fh.read())
)
name = station.write(obj.as_object())
log("Wrote %s into stationdb" % name)
if __name__ == '__main__':
main()
|
|
4a6eb49a2fd93c22b7e7491d1b225f0dd3b2e078
|
ielex/lexicon/migrations/0092_set_cjc_reliabilities_high.py
|
ielex/lexicon/migrations/0092_set_cjc_reliabilities_high.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
'''
I accidentially deleted a citation in production.
This migration adds it again.
'''
# Models to work with:
CognateJudgementCitation = apps.get_model(
'lexicon', 'CognateJudgementCitation')
MeaningList = apps.get_model('lexicon', 'MeaningList')
LanguageList = apps.get_model('lexicon', 'LanguageList')
Lexeme = apps.get_model('lexicon', 'Lexeme')
# Interesting data:
lList = LanguageList.objects.get(name='Current')
mList = MeaningList.objects.get(name='Jena200')
languageIds = lList.languages.values_list('id', flat=True)
meaningIds = mList.meanings.values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds).values_list('id', flat=True)
# Entries to modify:
cjcs = CognateJudgementCitation.objects.filter(
cognate_judgement__lexeme_id__in=lexemeIds)
# Setting reliability to high:
cjcs.update(reliability='A')
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0091_bhojpuri')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
Set CognateJudgementCitation reliabilities to high
|
Set CognateJudgementCitation reliabilities to high
- This only affects CognateJudgementCitations that link to Lexemes
that belong to Meanings from the `Jena200` list and belong to Languages from the `Current` list.
- Relates to #229
|
Python
|
bsd-2-clause
|
lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public,lingdb/CoBL-public
|
Set CognateJudgementCitation reliabilities to high
- This only affects CognateJudgementCitations that link to Lexemes
that belong to Meanings from the `Jena200` list and belong to Languages from the `Current` list.
- Relates to #229
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
'''
I accidentially deleted a citation in production.
This migration adds it again.
'''
# Models to work with:
CognateJudgementCitation = apps.get_model(
'lexicon', 'CognateJudgementCitation')
MeaningList = apps.get_model('lexicon', 'MeaningList')
LanguageList = apps.get_model('lexicon', 'LanguageList')
Lexeme = apps.get_model('lexicon', 'Lexeme')
# Interesting data:
lList = LanguageList.objects.get(name='Current')
mList = MeaningList.objects.get(name='Jena200')
languageIds = lList.languages.values_list('id', flat=True)
meaningIds = mList.meanings.values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds).values_list('id', flat=True)
# Entries to modify:
cjcs = CognateJudgementCitation.objects.filter(
cognate_judgement__lexeme_id__in=lexemeIds)
# Setting reliability to high:
cjcs.update(reliability='A')
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0091_bhojpuri')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
<commit_before><commit_msg>Set CognateJudgementCitation reliabilities to high
- This only affects CognateJudgementCitations that link to Lexemes
that belong to Meanings from the `Jena200` list and belong to Languages from the `Current` list.
- Relates to #229<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
'''
I accidentially deleted a citation in production.
This migration adds it again.
'''
# Models to work with:
CognateJudgementCitation = apps.get_model(
'lexicon', 'CognateJudgementCitation')
MeaningList = apps.get_model('lexicon', 'MeaningList')
LanguageList = apps.get_model('lexicon', 'LanguageList')
Lexeme = apps.get_model('lexicon', 'Lexeme')
# Interesting data:
lList = LanguageList.objects.get(name='Current')
mList = MeaningList.objects.get(name='Jena200')
languageIds = lList.languages.values_list('id', flat=True)
meaningIds = mList.meanings.values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds).values_list('id', flat=True)
# Entries to modify:
cjcs = CognateJudgementCitation.objects.filter(
cognate_judgement__lexeme_id__in=lexemeIds)
# Setting reliability to high:
cjcs.update(reliability='A')
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0091_bhojpuri')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
Set CognateJudgementCitation reliabilities to high
- This only affects CognateJudgementCitations that link to Lexemes
that belong to Meanings from the `Jena200` list and belong to Languages from the `Current` list.
- Relates to #229# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
'''
I accidentially deleted a citation in production.
This migration adds it again.
'''
# Models to work with:
CognateJudgementCitation = apps.get_model(
'lexicon', 'CognateJudgementCitation')
MeaningList = apps.get_model('lexicon', 'MeaningList')
LanguageList = apps.get_model('lexicon', 'LanguageList')
Lexeme = apps.get_model('lexicon', 'Lexeme')
# Interesting data:
lList = LanguageList.objects.get(name='Current')
mList = MeaningList.objects.get(name='Jena200')
languageIds = lList.languages.values_list('id', flat=True)
meaningIds = mList.meanings.values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds).values_list('id', flat=True)
# Entries to modify:
cjcs = CognateJudgementCitation.objects.filter(
cognate_judgement__lexeme_id__in=lexemeIds)
# Setting reliability to high:
cjcs.update(reliability='A')
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0091_bhojpuri')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
<commit_before><commit_msg>Set CognateJudgementCitation reliabilities to high
- This only affects CognateJudgementCitations that link to Lexemes
that belong to Meanings from the `Jena200` list and belong to Languages from the `Current` list.
- Relates to #229<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards_func(apps, schema_editor):
'''
I accidentially deleted a citation in production.
This migration adds it again.
'''
# Models to work with:
CognateJudgementCitation = apps.get_model(
'lexicon', 'CognateJudgementCitation')
MeaningList = apps.get_model('lexicon', 'MeaningList')
LanguageList = apps.get_model('lexicon', 'LanguageList')
Lexeme = apps.get_model('lexicon', 'Lexeme')
# Interesting data:
lList = LanguageList.objects.get(name='Current')
mList = MeaningList.objects.get(name='Jena200')
languageIds = lList.languages.values_list('id', flat=True)
meaningIds = mList.meanings.values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds).values_list('id', flat=True)
# Entries to modify:
cjcs = CognateJudgementCitation.objects.filter(
cognate_judgement__lexeme_id__in=lexemeIds)
# Setting reliability to high:
cjcs.update(reliability='A')
def reverse_func(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [('lexicon', '0091_bhojpuri')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
|
|
2bf24460f0ffc1b1c99c110bd552e1c0e59d6d87
|
byceps/services/shop/order/ordered_articles_service.py
|
byceps/services/shop/order/ordered_articles_service.py
|
"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItem]:
"""Return all order items for that article."""
return OrderItem.query \
.filter_by(article_number=article_number) \
.all()
|
"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem, OrderItemTuple
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItemTuple]:
"""Return all order items for that article."""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.all()
return [item.to_tuple() for item in order_items]
|
Return `OrderItemTuple`s rather than order item database entities from service
|
Return `OrderItemTuple`s rather than order item database entities from service
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps
|
"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItem]:
"""Return all order items for that article."""
return OrderItem.query \
.filter_by(article_number=article_number) \
.all()
Return `OrderItemTuple`s rather than order item database entities from service
|
"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem, OrderItemTuple
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItemTuple]:
"""Return all order items for that article."""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.all()
return [item.to_tuple() for item in order_items]
|
<commit_before>"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItem]:
"""Return all order items for that article."""
return OrderItem.query \
.filter_by(article_number=article_number) \
.all()
<commit_msg>Return `OrderItemTuple`s rather than order item database entities from service<commit_after>
|
"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem, OrderItemTuple
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItemTuple]:
"""Return all order items for that article."""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.all()
return [item.to_tuple() for item in order_items]
|
"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItem]:
"""Return all order items for that article."""
return OrderItem.query \
.filter_by(article_number=article_number) \
.all()
Return `OrderItemTuple`s rather than order item database entities from service"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem, OrderItemTuple
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItemTuple]:
"""Return all order items for that article."""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.all()
return [item.to_tuple() for item in order_items]
|
<commit_before>"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItem]:
"""Return all order items for that article."""
return OrderItem.query \
.filter_by(article_number=article_number) \
.all()
<commit_msg>Return `OrderItemTuple`s rather than order item database entities from service<commit_after>"""
byceps.services.shop.order.ordered_articles_service
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from collections import Counter
from typing import Dict, Sequence
from ....database import db
from ..article.models.article import ArticleNumber
from .models.order_item import OrderItem, OrderItemTuple
from .models.payment import PaymentState
def count_ordered_articles(article_number: ArticleNumber
) -> Dict[PaymentState, int]:
"""Count how often the article has been ordered, grouped by the
order's payment state.
"""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.options(
db.joinedload('order'),
db.joinedload('article'),
) \
.all()
# Ensure every payment state is present in the resulting dictionary,
# even if no orders of the corresponding payment state exist for the
# article.
counter = Counter({state: 0 for state in PaymentState})
for order_item in order_items:
counter[order_item.order.payment_state] += order_item.quantity
return dict(counter)
def get_order_items_for_article(article_number: ArticleNumber
) -> Sequence[OrderItemTuple]:
"""Return all order items for that article."""
order_items = OrderItem.query \
.filter_by(article_number=article_number) \
.all()
return [item.to_tuple() for item in order_items]
|
e77af0365a977191f00250887e8f8945ab460b5a
|
backend/breach/migrations/0015_auto_20160511_1740.py
|
backend/breach/migrations/0015_auto_20160511_1740.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-11 17:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0014_auto_20160504_1753'),
]
operations = [
migrations.AddField(
model_name='victim',
name='realtimeurl',
field=models.CharField(default='http://localhost:3031', help_text="The realtime module URL that the client should communicate with. This URL must include the 'http://' prefix.", max_length=255),
),
migrations.AlterField(
model_name='victim',
name='method',
field=models.IntegerField(choices=[(1, 'serial'), (2, 'divide&conquer')], default=1, help_text='Method of building candidate samplesets.'),
),
]
|
Add migration for methods, realtimeurl
|
Add migration for methods, realtimeurl
|
Python
|
mit
|
dimkarakostas/rupture,dionyziz/rupture,dimriou/rupture,dimkarakostas/rupture,dionyziz/rupture,dionyziz/rupture,esarafianou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dimriou/rupture,dionyziz/rupture,esarafianou/rupture,esarafianou/rupture,dionyziz/rupture,dimriou/rupture,dimriou/rupture,dimkarakostas/rupture,esarafianou/rupture,dimriou/rupture
|
Add migration for methods, realtimeurl
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-11 17:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0014_auto_20160504_1753'),
]
operations = [
migrations.AddField(
model_name='victim',
name='realtimeurl',
field=models.CharField(default='http://localhost:3031', help_text="The realtime module URL that the client should communicate with. This URL must include the 'http://' prefix.", max_length=255),
),
migrations.AlterField(
model_name='victim',
name='method',
field=models.IntegerField(choices=[(1, 'serial'), (2, 'divide&conquer')], default=1, help_text='Method of building candidate samplesets.'),
),
]
|
<commit_before><commit_msg>Add migration for methods, realtimeurl<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-11 17:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0014_auto_20160504_1753'),
]
operations = [
migrations.AddField(
model_name='victim',
name='realtimeurl',
field=models.CharField(default='http://localhost:3031', help_text="The realtime module URL that the client should communicate with. This URL must include the 'http://' prefix.", max_length=255),
),
migrations.AlterField(
model_name='victim',
name='method',
field=models.IntegerField(choices=[(1, 'serial'), (2, 'divide&conquer')], default=1, help_text='Method of building candidate samplesets.'),
),
]
|
Add migration for methods, realtimeurl# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-11 17:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0014_auto_20160504_1753'),
]
operations = [
migrations.AddField(
model_name='victim',
name='realtimeurl',
field=models.CharField(default='http://localhost:3031', help_text="The realtime module URL that the client should communicate with. This URL must include the 'http://' prefix.", max_length=255),
),
migrations.AlterField(
model_name='victim',
name='method',
field=models.IntegerField(choices=[(1, 'serial'), (2, 'divide&conquer')], default=1, help_text='Method of building candidate samplesets.'),
),
]
|
<commit_before><commit_msg>Add migration for methods, realtimeurl<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-05-11 17:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('breach', '0014_auto_20160504_1753'),
]
operations = [
migrations.AddField(
model_name='victim',
name='realtimeurl',
field=models.CharField(default='http://localhost:3031', help_text="The realtime module URL that the client should communicate with. This URL must include the 'http://' prefix.", max_length=255),
),
migrations.AlterField(
model_name='victim',
name='method',
field=models.IntegerField(choices=[(1, 'serial'), (2, 'divide&conquer')], default=1, help_text='Method of building candidate samplesets.'),
),
]
|
|
474c2ec9fcea87e4020b3ee5f161d938ce2b558e
|
nurseconnect/models.py
|
nurseconnect/models.py
|
from django.db import models
from molo.profiles.models import UserProfile as MoloUserProfile
class UserProfile(MoloUserProfile):
"""
NurseConnect requires a few modifications to the standard way Profiles are
handled by molo.profiles. This model serves to implement these.
"""
clinic_code = models.CharField(
min_length=6,
max_length=6,
blank=True,
null=True)
|
Create UserProfile model for NurseConnect
|
Create UserProfile model for NurseConnect
|
Python
|
bsd-2-clause
|
praekelt/nurseconnect,praekelt/nurseconnect,praekelt/nurseconnect
|
Create UserProfile model for NurseConnect
|
from django.db import models
from molo.profiles.models import UserProfile as MoloUserProfile
class UserProfile(MoloUserProfile):
"""
NurseConnect requires a few modifications to the standard way Profiles are
handled by molo.profiles. This model serves to implement these.
"""
clinic_code = models.CharField(
min_length=6,
max_length=6,
blank=True,
null=True)
|
<commit_before><commit_msg>Create UserProfile model for NurseConnect<commit_after>
|
from django.db import models
from molo.profiles.models import UserProfile as MoloUserProfile
class UserProfile(MoloUserProfile):
"""
NurseConnect requires a few modifications to the standard way Profiles are
handled by molo.profiles. This model serves to implement these.
"""
clinic_code = models.CharField(
min_length=6,
max_length=6,
blank=True,
null=True)
|
Create UserProfile model for NurseConnectfrom django.db import models
from molo.profiles.models import UserProfile as MoloUserProfile
class UserProfile(MoloUserProfile):
"""
NurseConnect requires a few modifications to the standard way Profiles are
handled by molo.profiles. This model serves to implement these.
"""
clinic_code = models.CharField(
min_length=6,
max_length=6,
blank=True,
null=True)
|
<commit_before><commit_msg>Create UserProfile model for NurseConnect<commit_after>from django.db import models
from molo.profiles.models import UserProfile as MoloUserProfile
class UserProfile(MoloUserProfile):
"""
NurseConnect requires a few modifications to the standard way Profiles are
handled by molo.profiles. This model serves to implement these.
"""
clinic_code = models.CharField(
min_length=6,
max_length=6,
blank=True,
null=True)
|
|
119bcb37d04e80f49bbb8bcf44b6ea4a937388b8
|
dbaas/workflow/steps/util/deploy/restart_td_agent.py
|
dbaas/workflow/steps/util/deploy/restart_td_agent.py
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import exec_remote_command
from dbaas_cloudstack.models import HostAttr as CS_HostAttr
from workflow.steps.util.base import BaseStep
from workflow.steps.util import test_bash_script_error
from workflow.steps.util import td_agent_script
from workflow.exceptions.error_codes import DBAAS_0002
from time import sleep
LOG = logging.getLogger(__name__)
class ReStartTdAgent(BaseStep):
def __unicode__(self):
return "Restarting td-agent..."
def do(self, workflow_dict):
try:
option = 'restart'
for host in workflow_dict['hosts']:
LOG.info("{} monit on host {}".format(option, host))
cs_host_attr = CS_HostAttr.objects.get(host=host)
script = test_bash_script_error()
script += td_agent_script(option)
LOG.info(script)
output = {}
sleep(30)
return_code = exec_remote_command(server=host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output)
LOG.info(output)
if return_code != 0:
LOG.error("Error monit")
LOG.error(str(output))
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0002)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
return True
|
Add td agent restarts tep
|
Add td agent restarts tep
|
Python
|
bsd-3-clause
|
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
|
Add td agent restarts tep
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import exec_remote_command
from dbaas_cloudstack.models import HostAttr as CS_HostAttr
from workflow.steps.util.base import BaseStep
from workflow.steps.util import test_bash_script_error
from workflow.steps.util import td_agent_script
from workflow.exceptions.error_codes import DBAAS_0002
from time import sleep
LOG = logging.getLogger(__name__)
class ReStartTdAgent(BaseStep):
def __unicode__(self):
return "Restarting td-agent..."
def do(self, workflow_dict):
try:
option = 'restart'
for host in workflow_dict['hosts']:
LOG.info("{} monit on host {}".format(option, host))
cs_host_attr = CS_HostAttr.objects.get(host=host)
script = test_bash_script_error()
script += td_agent_script(option)
LOG.info(script)
output = {}
sleep(30)
return_code = exec_remote_command(server=host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output)
LOG.info(output)
if return_code != 0:
LOG.error("Error monit")
LOG.error(str(output))
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0002)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
return True
|
<commit_before><commit_msg>Add td agent restarts tep<commit_after>
|
# -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import exec_remote_command
from dbaas_cloudstack.models import HostAttr as CS_HostAttr
from workflow.steps.util.base import BaseStep
from workflow.steps.util import test_bash_script_error
from workflow.steps.util import td_agent_script
from workflow.exceptions.error_codes import DBAAS_0002
from time import sleep
LOG = logging.getLogger(__name__)
class ReStartTdAgent(BaseStep):
def __unicode__(self):
return "Restarting td-agent..."
def do(self, workflow_dict):
try:
option = 'restart'
for host in workflow_dict['hosts']:
LOG.info("{} monit on host {}".format(option, host))
cs_host_attr = CS_HostAttr.objects.get(host=host)
script = test_bash_script_error()
script += td_agent_script(option)
LOG.info(script)
output = {}
sleep(30)
return_code = exec_remote_command(server=host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output)
LOG.info(output)
if return_code != 0:
LOG.error("Error monit")
LOG.error(str(output))
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0002)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
return True
|
Add td agent restarts tep# -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import exec_remote_command
from dbaas_cloudstack.models import HostAttr as CS_HostAttr
from workflow.steps.util.base import BaseStep
from workflow.steps.util import test_bash_script_error
from workflow.steps.util import td_agent_script
from workflow.exceptions.error_codes import DBAAS_0002
from time import sleep
LOG = logging.getLogger(__name__)
class ReStartTdAgent(BaseStep):
def __unicode__(self):
return "Restarting td-agent..."
def do(self, workflow_dict):
try:
option = 'restart'
for host in workflow_dict['hosts']:
LOG.info("{} monit on host {}".format(option, host))
cs_host_attr = CS_HostAttr.objects.get(host=host)
script = test_bash_script_error()
script += td_agent_script(option)
LOG.info(script)
output = {}
sleep(30)
return_code = exec_remote_command(server=host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output)
LOG.info(output)
if return_code != 0:
LOG.error("Error monit")
LOG.error(str(output))
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0002)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
return True
|
<commit_before><commit_msg>Add td agent restarts tep<commit_after># -*- coding: utf-8 -*-
import logging
from util import full_stack
from util import exec_remote_command
from dbaas_cloudstack.models import HostAttr as CS_HostAttr
from workflow.steps.util.base import BaseStep
from workflow.steps.util import test_bash_script_error
from workflow.steps.util import td_agent_script
from workflow.exceptions.error_codes import DBAAS_0002
from time import sleep
LOG = logging.getLogger(__name__)
class ReStartTdAgent(BaseStep):
def __unicode__(self):
return "Restarting td-agent..."
def do(self, workflow_dict):
try:
option = 'restart'
for host in workflow_dict['hosts']:
LOG.info("{} monit on host {}".format(option, host))
cs_host_attr = CS_HostAttr.objects.get(host=host)
script = test_bash_script_error()
script += td_agent_script(option)
LOG.info(script)
output = {}
sleep(30)
return_code = exec_remote_command(server=host.address,
username=cs_host_attr.vm_user,
password=cs_host_attr.vm_password,
command=script,
output=output)
LOG.info(output)
if return_code != 0:
LOG.error("Error monit")
LOG.error(str(output))
return True
except Exception:
traceback = full_stack()
workflow_dict['exceptions']['error_codes'].append(DBAAS_0002)
workflow_dict['exceptions']['traceback'].append(traceback)
return False
def undo(self, workflow_dict):
LOG.info("Running undo...")
return True
|
|
ca7b91c3fea848c050b7d1e9099327e4f8ffcc31
|
Collapse_Capture_Intervals.py
|
Collapse_Capture_Intervals.py
|
#!/usr/bin/env python
"""Collapse smaller consecutive intervals into larger ones. Allows a minimum
distance intervals are spaced for them to be consecutive."""
import sys
intervals = sys.argv[1]
def collapse_intervals(regions, dist, minsize=150):
"""Takes a list of tuples describing intervals, and collapses consecutive
intervals into larger ones. If two intervals are 'dist' or fewer bp apart,
they are considered to be in the same region. If an interval falls below a
certain size, it is rejected."""
new_reg = []
# First collapse consecutive intervals
tmp_reg = ()
for r in regions:
if len(tmp_reg) == 0:
tmp_reg += r
else:
if int(r[0]) - int(tmp_reg[-1]) > dist:
new_reg.append((tmp_reg[0], tmp_reg[-1]))
tmp_reg = ()
else:
tmp_reg += r
# Then filter by size
new_reg_flt = [i for i in new_reg if int(i[1]) - int(i[0]) >= minsize]
return(new_reg_flt)
chroms = {}
with open(sys.argv[1], 'r') as f:
for line in f:
tmp = line.strip().split()
if tmp[0] not in chroms:
chroms[tmp[0]] = []
chroms[tmp[0]].append((tmp[1], tmp[2]))
for c in sorted(chroms.keys()):
captured = collapse_intervals(chroms[c], 50)
for reg in captured:
print '\t'.join([c, reg[0], reg[1]])
|
Add script to generate BED intervals from filtered coverage data
|
Add script to generate BED intervals from filtered coverage data
|
Python
|
unlicense
|
TomJKono/Misc_Utils,TomJKono/Misc_Utils,TomJKono/Misc_Utils
|
Add script to generate BED intervals from filtered coverage data
|
#!/usr/bin/env python
"""Collapse smaller consecutive intervals into larger ones. Allows a minimum
distance intervals are spaced for them to be consecutive."""
import sys
intervals = sys.argv[1]
def collapse_intervals(regions, dist, minsize=150):
"""Takes a list of tuples describing intervals, and collapses consecutive
intervals into larger ones. If two intervals are 'dist' or fewer bp apart,
they are considered to be in the same region. If an interval falls below a
certain size, it is rejected."""
new_reg = []
# First collapse consecutive intervals
tmp_reg = ()
for r in regions:
if len(tmp_reg) == 0:
tmp_reg += r
else:
if int(r[0]) - int(tmp_reg[-1]) > dist:
new_reg.append((tmp_reg[0], tmp_reg[-1]))
tmp_reg = ()
else:
tmp_reg += r
# Then filter by size
new_reg_flt = [i for i in new_reg if int(i[1]) - int(i[0]) >= minsize]
return(new_reg_flt)
chroms = {}
with open(sys.argv[1], 'r') as f:
for line in f:
tmp = line.strip().split()
if tmp[0] not in chroms:
chroms[tmp[0]] = []
chroms[tmp[0]].append((tmp[1], tmp[2]))
for c in sorted(chroms.keys()):
captured = collapse_intervals(chroms[c], 50)
for reg in captured:
print '\t'.join([c, reg[0], reg[1]])
|
<commit_before><commit_msg>Add script to generate BED intervals from filtered coverage data<commit_after>
|
#!/usr/bin/env python
"""Collapse smaller consecutive intervals into larger ones. Allows a minimum
distance intervals are spaced for them to be consecutive."""
import sys
intervals = sys.argv[1]
def collapse_intervals(regions, dist, minsize=150):
"""Takes a list of tuples describing intervals, and collapses consecutive
intervals into larger ones. If two intervals are 'dist' or fewer bp apart,
they are considered to be in the same region. If an interval falls below a
certain size, it is rejected."""
new_reg = []
# First collapse consecutive intervals
tmp_reg = ()
for r in regions:
if len(tmp_reg) == 0:
tmp_reg += r
else:
if int(r[0]) - int(tmp_reg[-1]) > dist:
new_reg.append((tmp_reg[0], tmp_reg[-1]))
tmp_reg = ()
else:
tmp_reg += r
# Then filter by size
new_reg_flt = [i for i in new_reg if int(i[1]) - int(i[0]) >= minsize]
return(new_reg_flt)
chroms = {}
with open(sys.argv[1], 'r') as f:
for line in f:
tmp = line.strip().split()
if tmp[0] not in chroms:
chroms[tmp[0]] = []
chroms[tmp[0]].append((tmp[1], tmp[2]))
for c in sorted(chroms.keys()):
captured = collapse_intervals(chroms[c], 50)
for reg in captured:
print '\t'.join([c, reg[0], reg[1]])
|
Add script to generate BED intervals from filtered coverage data#!/usr/bin/env python
"""Collapse smaller consecutive intervals into larger ones. Allows a minimum
distance intervals are spaced for them to be consecutive."""
import sys
intervals = sys.argv[1]
def collapse_intervals(regions, dist, minsize=150):
"""Takes a list of tuples describing intervals, and collapses consecutive
intervals into larger ones. If two intervals are 'dist' or fewer bp apart,
they are considered to be in the same region. If an interval falls below a
certain size, it is rejected."""
new_reg = []
# First collapse consecutive intervals
tmp_reg = ()
for r in regions:
if len(tmp_reg) == 0:
tmp_reg += r
else:
if int(r[0]) - int(tmp_reg[-1]) > dist:
new_reg.append((tmp_reg[0], tmp_reg[-1]))
tmp_reg = ()
else:
tmp_reg += r
# Then filter by size
new_reg_flt = [i for i in new_reg if int(i[1]) - int(i[0]) >= minsize]
return(new_reg_flt)
chroms = {}
with open(sys.argv[1], 'r') as f:
for line in f:
tmp = line.strip().split()
if tmp[0] not in chroms:
chroms[tmp[0]] = []
chroms[tmp[0]].append((tmp[1], tmp[2]))
for c in sorted(chroms.keys()):
captured = collapse_intervals(chroms[c], 50)
for reg in captured:
print '\t'.join([c, reg[0], reg[1]])
|
<commit_before><commit_msg>Add script to generate BED intervals from filtered coverage data<commit_after>#!/usr/bin/env python
"""Collapse smaller consecutive intervals into larger ones. Allows a minimum
distance intervals are spaced for them to be consecutive."""
import sys
intervals = sys.argv[1]
def collapse_intervals(regions, dist, minsize=150):
"""Takes a list of tuples describing intervals, and collapses consecutive
intervals into larger ones. If two intervals are 'dist' or fewer bp apart,
they are considered to be in the same region. If an interval falls below a
certain size, it is rejected."""
new_reg = []
# First collapse consecutive intervals
tmp_reg = ()
for r in regions:
if len(tmp_reg) == 0:
tmp_reg += r
else:
if int(r[0]) - int(tmp_reg[-1]) > dist:
new_reg.append((tmp_reg[0], tmp_reg[-1]))
tmp_reg = ()
else:
tmp_reg += r
# Then filter by size
new_reg_flt = [i for i in new_reg if int(i[1]) - int(i[0]) >= minsize]
return(new_reg_flt)
chroms = {}
with open(sys.argv[1], 'r') as f:
for line in f:
tmp = line.strip().split()
if tmp[0] not in chroms:
chroms[tmp[0]] = []
chroms[tmp[0]].append((tmp[1], tmp[2]))
for c in sorted(chroms.keys()):
captured = collapse_intervals(chroms[c], 50)
for reg in captured:
print '\t'.join([c, reg[0], reg[1]])
|
|
6edd1b4856956436a9a6844a29856681e1df6248
|
leetcode/287-Find-the-Duplicate-Number/FindtheDupNum_001.py
|
leetcode/287-Find-the-Duplicate-Number/FindtheDupNum_001.py
|
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 1, len(nums) - 1
while l < r:
m = l + (r - l) / 2
cnt = 0
for num in nums:
if num <= m:
cnt += 1
print l, r, m
if cnt > m:
r = m
else:
l = m + 1
return l
|
Add the bin search py version of FindtheDupNum
|
Add the bin search py version of FindtheDupNum
|
Python
|
mit
|
Chasego/codirit,cc13ny/algo,cc13ny/Allin,Chasego/codirit,Chasego/cod,Chasego/codirit,cc13ny/algo,Chasego/cod,cc13ny/Allin,Chasego/codirit,Chasego/codi,Chasego/cod,Chasego/codi,Chasego/cod,cc13ny/Allin,Chasego/codi,Chasego/codi,cc13ny/algo,Chasego/codi,cc13ny/algo,Chasego/codirit,Chasego/cod,cc13ny/Allin,cc13ny/algo,cc13ny/Allin
|
Add the bin search py version of FindtheDupNum
|
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 1, len(nums) - 1
while l < r:
m = l + (r - l) / 2
cnt = 0
for num in nums:
if num <= m:
cnt += 1
print l, r, m
if cnt > m:
r = m
else:
l = m + 1
return l
|
<commit_before><commit_msg>Add the bin search py version of FindtheDupNum<commit_after>
|
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 1, len(nums) - 1
while l < r:
m = l + (r - l) / 2
cnt = 0
for num in nums:
if num <= m:
cnt += 1
print l, r, m
if cnt > m:
r = m
else:
l = m + 1
return l
|
Add the bin search py version of FindtheDupNumclass Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 1, len(nums) - 1
while l < r:
m = l + (r - l) / 2
cnt = 0
for num in nums:
if num <= m:
cnt += 1
print l, r, m
if cnt > m:
r = m
else:
l = m + 1
return l
|
<commit_before><commit_msg>Add the bin search py version of FindtheDupNum<commit_after>class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 1, len(nums) - 1
while l < r:
m = l + (r - l) / 2
cnt = 0
for num in nums:
if num <= m:
cnt += 1
print l, r, m
if cnt > m:
r = m
else:
l = m + 1
return l
|
|
adc87e9c16e453acd6f5dd74ea4ec3b61458bb73
|
cinch/tests/lexer_tests.py
|
cinch/tests/lexer_tests.py
|
from unittest import TestCase
from cinch.lexer import lex
class TestLexer(TestCase):
def test_lex(self):
# noqa -- this is formatted like real code
source = """
if ( 4 ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['if', '(', '4', ')', '{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
# noqa -- this is formatted like real code
source = """
function afunction ( a b c ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['function', 'afunction', '(', 'a', 'b', 'c', ')',
'{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
|
Add tests for the lexer
|
Add tests for the lexer
|
Python
|
mit
|
tschuy/cinch-lang,iankronquist/cinch-lang
|
Add tests for the lexer
|
from unittest import TestCase
from cinch.lexer import lex
class TestLexer(TestCase):
def test_lex(self):
# noqa -- this is formatted like real code
source = """
if ( 4 ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['if', '(', '4', ')', '{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
# noqa -- this is formatted like real code
source = """
function afunction ( a b c ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['function', 'afunction', '(', 'a', 'b', 'c', ')',
'{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
|
<commit_before><commit_msg>Add tests for the lexer<commit_after>
|
from unittest import TestCase
from cinch.lexer import lex
class TestLexer(TestCase):
def test_lex(self):
# noqa -- this is formatted like real code
source = """
if ( 4 ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['if', '(', '4', ')', '{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
# noqa -- this is formatted like real code
source = """
function afunction ( a b c ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['function', 'afunction', '(', 'a', 'b', 'c', ')',
'{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
|
Add tests for the lexerfrom unittest import TestCase
from cinch.lexer import lex
class TestLexer(TestCase):
def test_lex(self):
# noqa -- this is formatted like real code
source = """
if ( 4 ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['if', '(', '4', ')', '{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
# noqa -- this is formatted like real code
source = """
function afunction ( a b c ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['function', 'afunction', '(', 'a', 'b', 'c', ')',
'{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
|
<commit_before><commit_msg>Add tests for the lexer<commit_after>from unittest import TestCase
from cinch.lexer import lex
class TestLexer(TestCase):
def test_lex(self):
# noqa -- this is formatted like real code
source = """
if ( 4 ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['if', '(', '4', ')', '{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
# noqa -- this is formatted like real code
source = """
function afunction ( a b c ) {
avariable = 1 + 1
anothervariable = a - 314
}
"""
# noqa -- this is formatted like real code
expected_lexing = ['function', 'afunction', '(', 'a', 'b', 'c', ')',
'{', # noqa
'avariable', '=', '1', '+', '1', # noqa
'anothervariable', '=', 'a', '-', '314', # noqa
'}']
result = lex(source)
self.assertEqual(result, expected_lexing)
|
|
258a244ad088f1f827e41f091c6fbc51ad44f27b
|
src/visualize_model.py
|
src/visualize_model.py
|
import argparse
from keras.utils import plot_model
from prep import prep_data
from model import get_model
ap = argparse.ArgumentParser()
ap.add_argument('--dataset', type=str, default='./data')
ap.add_argument('--x_length', type=int, default=15)
ap.add_argument('--y_length', type=int, default=20)
ap.add_argument('--hidden_size', type=int, default=128)
ap.add_argument('--num_layers', type=int, default=2)
ap.add_argument('--show_shapes', type=bool, default=False)
args = vars(ap.parse_args())
DATASET_PATH = args['dataset']
INPUT_LENGTH = args['x_length']
OUTPUT_LENGTH = args['y_length']
HIDDEN_SIZE = args['hidden_size']
NUM_LAYERS = args['num_layers']
SHOW_SHAPES = args['show_shapes']
_, _, input_vocab, output_vocab, _, _ = prep_data(DATASET_PATH, INPUT_LENGTH, OUTPUT_LENGTH)
auto_encoder = get_model(len(input_vocab),
INPUT_LENGTH,
len(output_vocab),
OUTPUT_LENGTH,
HIDDEN_SIZE,
NUM_LAYERS)
plot_model(auto_encoder, to_file='model.png', show_shapes=SHOW_SHAPES)
|
Add script for visualizing model
|
Add script for visualizing model
|
Python
|
mit
|
vdragan1993/python-coder
|
Add script for visualizing model
|
import argparse
from keras.utils import plot_model
from prep import prep_data
from model import get_model
ap = argparse.ArgumentParser()
ap.add_argument('--dataset', type=str, default='./data')
ap.add_argument('--x_length', type=int, default=15)
ap.add_argument('--y_length', type=int, default=20)
ap.add_argument('--hidden_size', type=int, default=128)
ap.add_argument('--num_layers', type=int, default=2)
ap.add_argument('--show_shapes', type=bool, default=False)
args = vars(ap.parse_args())
DATASET_PATH = args['dataset']
INPUT_LENGTH = args['x_length']
OUTPUT_LENGTH = args['y_length']
HIDDEN_SIZE = args['hidden_size']
NUM_LAYERS = args['num_layers']
SHOW_SHAPES = args['show_shapes']
_, _, input_vocab, output_vocab, _, _ = prep_data(DATASET_PATH, INPUT_LENGTH, OUTPUT_LENGTH)
auto_encoder = get_model(len(input_vocab),
INPUT_LENGTH,
len(output_vocab),
OUTPUT_LENGTH,
HIDDEN_SIZE,
NUM_LAYERS)
plot_model(auto_encoder, to_file='model.png', show_shapes=SHOW_SHAPES)
|
<commit_before><commit_msg>Add script for visualizing model<commit_after>
|
import argparse
from keras.utils import plot_model
from prep import prep_data
from model import get_model
ap = argparse.ArgumentParser()
ap.add_argument('--dataset', type=str, default='./data')
ap.add_argument('--x_length', type=int, default=15)
ap.add_argument('--y_length', type=int, default=20)
ap.add_argument('--hidden_size', type=int, default=128)
ap.add_argument('--num_layers', type=int, default=2)
ap.add_argument('--show_shapes', type=bool, default=False)
args = vars(ap.parse_args())
DATASET_PATH = args['dataset']
INPUT_LENGTH = args['x_length']
OUTPUT_LENGTH = args['y_length']
HIDDEN_SIZE = args['hidden_size']
NUM_LAYERS = args['num_layers']
SHOW_SHAPES = args['show_shapes']
_, _, input_vocab, output_vocab, _, _ = prep_data(DATASET_PATH, INPUT_LENGTH, OUTPUT_LENGTH)
auto_encoder = get_model(len(input_vocab),
INPUT_LENGTH,
len(output_vocab),
OUTPUT_LENGTH,
HIDDEN_SIZE,
NUM_LAYERS)
plot_model(auto_encoder, to_file='model.png', show_shapes=SHOW_SHAPES)
|
Add script for visualizing modelimport argparse
from keras.utils import plot_model
from prep import prep_data
from model import get_model
ap = argparse.ArgumentParser()
ap.add_argument('--dataset', type=str, default='./data')
ap.add_argument('--x_length', type=int, default=15)
ap.add_argument('--y_length', type=int, default=20)
ap.add_argument('--hidden_size', type=int, default=128)
ap.add_argument('--num_layers', type=int, default=2)
ap.add_argument('--show_shapes', type=bool, default=False)
args = vars(ap.parse_args())
DATASET_PATH = args['dataset']
INPUT_LENGTH = args['x_length']
OUTPUT_LENGTH = args['y_length']
HIDDEN_SIZE = args['hidden_size']
NUM_LAYERS = args['num_layers']
SHOW_SHAPES = args['show_shapes']
_, _, input_vocab, output_vocab, _, _ = prep_data(DATASET_PATH, INPUT_LENGTH, OUTPUT_LENGTH)
auto_encoder = get_model(len(input_vocab),
INPUT_LENGTH,
len(output_vocab),
OUTPUT_LENGTH,
HIDDEN_SIZE,
NUM_LAYERS)
plot_model(auto_encoder, to_file='model.png', show_shapes=SHOW_SHAPES)
|
<commit_before><commit_msg>Add script for visualizing model<commit_after>import argparse
from keras.utils import plot_model
from prep import prep_data
from model import get_model
ap = argparse.ArgumentParser()
ap.add_argument('--dataset', type=str, default='./data')
ap.add_argument('--x_length', type=int, default=15)
ap.add_argument('--y_length', type=int, default=20)
ap.add_argument('--hidden_size', type=int, default=128)
ap.add_argument('--num_layers', type=int, default=2)
ap.add_argument('--show_shapes', type=bool, default=False)
args = vars(ap.parse_args())
DATASET_PATH = args['dataset']
INPUT_LENGTH = args['x_length']
OUTPUT_LENGTH = args['y_length']
HIDDEN_SIZE = args['hidden_size']
NUM_LAYERS = args['num_layers']
SHOW_SHAPES = args['show_shapes']
_, _, input_vocab, output_vocab, _, _ = prep_data(DATASET_PATH, INPUT_LENGTH, OUTPUT_LENGTH)
auto_encoder = get_model(len(input_vocab),
INPUT_LENGTH,
len(output_vocab),
OUTPUT_LENGTH,
HIDDEN_SIZE,
NUM_LAYERS)
plot_model(auto_encoder, to_file='model.png', show_shapes=SHOW_SHAPES)
|
|
84fa2d2a5a1ff4aa0dca33a76677283d33dcf337
|
test/mock-connector.py
|
test/mock-connector.py
|
#!/usr/bin/python3
#
# This file is part of the KNOT Project
#
# Copyright (c) 2019, CESAR. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pika
import logging
logging.basicConfig(
format='%(asctime)s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
cloud_exchange = 'cloud'
fog_exchange = 'fog'
EVENT_UNREGISTER = 'device.unregister'
KEY_UNREGISTERED = 'device.unregistered'
channel.exchange_declare(exchange=fog_exchange, durable=True,
exchange_type='topic')
channel.exchange_declare(exchange=cloud_exchange, durable=True,
exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(
exchange=cloud_exchange, queue=queue_name, routing_key='device.*')
def callback(ch, method, properties, body):
logging.info("%r:%r" % (method.routing_key, body))
if method.routing_key == EVENT_UNREGISTER:
message = body
channel.basic_publish(exchange=fog_exchange,
routing_key=KEY_UNREGISTERED, body=message)
logging.info(" [x] Sent %r" % (message))
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
logging.info('Listening to messages')
channel.start_consuming()
|
Add mock script to test messages in amqp
|
Add mock script to test messages in amqp
This patch adds support to receive and send unregistered
messages.
|
Python
|
lgpl-2.1
|
CESARBR/knot-service-source,CESARBR/knot-service-source,CESARBR/knot-service-source,CESARBR/knot-service-source,CESARBR/knot-service-source
|
Add mock script to test messages in amqp
This patch adds support to receive and send unregistered
messages.
|
#!/usr/bin/python3
#
# This file is part of the KNOT Project
#
# Copyright (c) 2019, CESAR. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pika
import logging
logging.basicConfig(
format='%(asctime)s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
cloud_exchange = 'cloud'
fog_exchange = 'fog'
EVENT_UNREGISTER = 'device.unregister'
KEY_UNREGISTERED = 'device.unregistered'
channel.exchange_declare(exchange=fog_exchange, durable=True,
exchange_type='topic')
channel.exchange_declare(exchange=cloud_exchange, durable=True,
exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(
exchange=cloud_exchange, queue=queue_name, routing_key='device.*')
def callback(ch, method, properties, body):
logging.info("%r:%r" % (method.routing_key, body))
if method.routing_key == EVENT_UNREGISTER:
message = body
channel.basic_publish(exchange=fog_exchange,
routing_key=KEY_UNREGISTERED, body=message)
logging.info(" [x] Sent %r" % (message))
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
logging.info('Listening to messages')
channel.start_consuming()
|
<commit_before><commit_msg>Add mock script to test messages in amqp
This patch adds support to receive and send unregistered
messages.<commit_after>
|
#!/usr/bin/python3
#
# This file is part of the KNOT Project
#
# Copyright (c) 2019, CESAR. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pika
import logging
logging.basicConfig(
format='%(asctime)s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
cloud_exchange = 'cloud'
fog_exchange = 'fog'
EVENT_UNREGISTER = 'device.unregister'
KEY_UNREGISTERED = 'device.unregistered'
channel.exchange_declare(exchange=fog_exchange, durable=True,
exchange_type='topic')
channel.exchange_declare(exchange=cloud_exchange, durable=True,
exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(
exchange=cloud_exchange, queue=queue_name, routing_key='device.*')
def callback(ch, method, properties, body):
logging.info("%r:%r" % (method.routing_key, body))
if method.routing_key == EVENT_UNREGISTER:
message = body
channel.basic_publish(exchange=fog_exchange,
routing_key=KEY_UNREGISTERED, body=message)
logging.info(" [x] Sent %r" % (message))
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
logging.info('Listening to messages')
channel.start_consuming()
|
Add mock script to test messages in amqp
This patch adds support to receive and send unregistered
messages.#!/usr/bin/python3
#
# This file is part of the KNOT Project
#
# Copyright (c) 2019, CESAR. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pika
import logging
logging.basicConfig(
format='%(asctime)s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
cloud_exchange = 'cloud'
fog_exchange = 'fog'
EVENT_UNREGISTER = 'device.unregister'
KEY_UNREGISTERED = 'device.unregistered'
channel.exchange_declare(exchange=fog_exchange, durable=True,
exchange_type='topic')
channel.exchange_declare(exchange=cloud_exchange, durable=True,
exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(
exchange=cloud_exchange, queue=queue_name, routing_key='device.*')
def callback(ch, method, properties, body):
logging.info("%r:%r" % (method.routing_key, body))
if method.routing_key == EVENT_UNREGISTER:
message = body
channel.basic_publish(exchange=fog_exchange,
routing_key=KEY_UNREGISTERED, body=message)
logging.info(" [x] Sent %r" % (message))
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
logging.info('Listening to messages')
channel.start_consuming()
|
<commit_before><commit_msg>Add mock script to test messages in amqp
This patch adds support to receive and send unregistered
messages.<commit_after>#!/usr/bin/python3
#
# This file is part of the KNOT Project
#
# Copyright (c) 2019, CESAR. All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pika
import logging
logging.basicConfig(
format='%(asctime)s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
cloud_exchange = 'cloud'
fog_exchange = 'fog'
EVENT_UNREGISTER = 'device.unregister'
KEY_UNREGISTERED = 'device.unregistered'
channel.exchange_declare(exchange=fog_exchange, durable=True,
exchange_type='topic')
channel.exchange_declare(exchange=cloud_exchange, durable=True,
exchange_type='topic')
result = channel.queue_declare('', exclusive=True)
queue_name = result.method.queue
channel.queue_bind(
exchange=cloud_exchange, queue=queue_name, routing_key='device.*')
def callback(ch, method, properties, body):
logging.info("%r:%r" % (method.routing_key, body))
if method.routing_key == EVENT_UNREGISTER:
message = body
channel.basic_publish(exchange=fog_exchange,
routing_key=KEY_UNREGISTERED, body=message)
logging.info(" [x] Sent %r" % (message))
channel.basic_consume(
queue=queue_name, on_message_callback=callback, auto_ack=True)
logging.info('Listening to messages')
channel.start_consuming()
|
|
5191055f0cc071ff4dc0a8ea42e598809c519be4
|
magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py
|
magnum/db/sqlalchemy/alembic/versions/3b6c4c42adb4_add_unique_constraints.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add unique constraints
Revision ID: 3b6c4c42adb4
Revises: 592131657ca1
Create Date: 2015-05-05 09:45:44.657047
"""
# revision identifiers, used by Alembic.
revision = '3b6c4c42adb4'
down_revision = '592131657ca1'
from alembic import op
def upgrade():
op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"])
op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock",
["bay_uuid"])
op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"])
op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"])
op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"])
op.create_unique_constraint("uniq_node0ironic_node_id", "node",
["ironic_node_id"])
op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"])
op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"])
op.create_unique_constraint("uniq_replicationcontroller0uuid",
"replicationcontroller", ["uuid"])
|
Add unique column constraints to db
|
Add unique column constraints to db
Unique constraints are not being set on the database but they are
defined in the schema. This db upgrade adds the unique constraints.
Change-Id: I0dd1c2190bf7b68e1e0c8c71fbb123d2c82cc0d3
Closes-Bug: 1451761
|
Python
|
apache-2.0
|
ChengTiesheng/magnum,hongbin/magnum,eshijia/magnum,Tennyson53/magnum,dimtruck/magnum,eshijia/SUR,ArchiFleKs/magnum,dimtruck/magnum,Tennyson53/magnum,ddepaoli3/magnum,LaynePeng/magnum,ChengTiesheng/magnum,ramielrowe/magnum,mjbrewer/testindex,Tennyson53/SUR,mjbrewer/testindex,ffantast/magnum,annegentle/magnum,ddepaoli3/magnum,mjbrewer/testindex,annegentle/magnum,LaynePeng/magnum,sajuptpm/magnum,Alzon/SUR,paulczar/magnum,eshijia/SUR,ArchiFleKs/magnum,paulczar/magnum,jay-lau/magnum,openstack/magnum,ffantast/magnum,hongbin/magnum,eshijia/magnum,ramielrowe/magnum,Tennyson53/SUR,Alzon/SUR,openstack/magnum,sajuptpm/magnum
|
Add unique column constraints to db
Unique constraints are not being set on the database but they are
defined in the schema. This db upgrade adds the unique constraints.
Change-Id: I0dd1c2190bf7b68e1e0c8c71fbb123d2c82cc0d3
Closes-Bug: 1451761
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add unique constraints
Revision ID: 3b6c4c42adb4
Revises: 592131657ca1
Create Date: 2015-05-05 09:45:44.657047
"""
# revision identifiers, used by Alembic.
revision = '3b6c4c42adb4'
down_revision = '592131657ca1'
from alembic import op
def upgrade():
op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"])
op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock",
["bay_uuid"])
op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"])
op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"])
op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"])
op.create_unique_constraint("uniq_node0ironic_node_id", "node",
["ironic_node_id"])
op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"])
op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"])
op.create_unique_constraint("uniq_replicationcontroller0uuid",
"replicationcontroller", ["uuid"])
|
<commit_before><commit_msg>Add unique column constraints to db
Unique constraints are not being set on the database but they are
defined in the schema. This db upgrade adds the unique constraints.
Change-Id: I0dd1c2190bf7b68e1e0c8c71fbb123d2c82cc0d3
Closes-Bug: 1451761<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add unique constraints
Revision ID: 3b6c4c42adb4
Revises: 592131657ca1
Create Date: 2015-05-05 09:45:44.657047
"""
# revision identifiers, used by Alembic.
revision = '3b6c4c42adb4'
down_revision = '592131657ca1'
from alembic import op
def upgrade():
op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"])
op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock",
["bay_uuid"])
op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"])
op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"])
op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"])
op.create_unique_constraint("uniq_node0ironic_node_id", "node",
["ironic_node_id"])
op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"])
op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"])
op.create_unique_constraint("uniq_replicationcontroller0uuid",
"replicationcontroller", ["uuid"])
|
Add unique column constraints to db
Unique constraints are not being set on the database but they are
defined in the schema. This db upgrade adds the unique constraints.
Change-Id: I0dd1c2190bf7b68e1e0c8c71fbb123d2c82cc0d3
Closes-Bug: 1451761# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add unique constraints
Revision ID: 3b6c4c42adb4
Revises: 592131657ca1
Create Date: 2015-05-05 09:45:44.657047
"""
# revision identifiers, used by Alembic.
revision = '3b6c4c42adb4'
down_revision = '592131657ca1'
from alembic import op
def upgrade():
op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"])
op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock",
["bay_uuid"])
op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"])
op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"])
op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"])
op.create_unique_constraint("uniq_node0ironic_node_id", "node",
["ironic_node_id"])
op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"])
op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"])
op.create_unique_constraint("uniq_replicationcontroller0uuid",
"replicationcontroller", ["uuid"])
|
<commit_before><commit_msg>Add unique column constraints to db
Unique constraints are not being set on the database but they are
defined in the schema. This db upgrade adds the unique constraints.
Change-Id: I0dd1c2190bf7b68e1e0c8c71fbb123d2c82cc0d3
Closes-Bug: 1451761<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Add unique constraints
Revision ID: 3b6c4c42adb4
Revises: 592131657ca1
Create Date: 2015-05-05 09:45:44.657047
"""
# revision identifiers, used by Alembic.
revision = '3b6c4c42adb4'
down_revision = '592131657ca1'
from alembic import op
def upgrade():
op.create_unique_constraint("uniq_bay0uuid", "bay", ["uuid"])
op.create_unique_constraint("uniq_baylock0bay_uuid", "baylock",
["bay_uuid"])
op.create_unique_constraint("uniq_baymodel0uuid", "baymodel", ["uuid"])
op.create_unique_constraint("uniq_container0uuid", "container", ["uuid"])
op.create_unique_constraint("uniq_node0uuid", "node", ["uuid"])
op.create_unique_constraint("uniq_node0ironic_node_id", "node",
["ironic_node_id"])
op.create_unique_constraint("uniq_pod0uuid", "pod", ["uuid"])
op.create_unique_constraint("uniq_service0uuid", "service", ["uuid"])
op.create_unique_constraint("uniq_replicationcontroller0uuid",
"replicationcontroller", ["uuid"])
|
|
d362aad24282f158aee4c8178f2f32a757658e11
|
py/delete-operation-for-two-strings.py
|
py/delete-operation-for-two-strings.py
|
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
prev = [0] * (len(word2) + 1)
for i, c1 in enumerate(word1, 1):
nxt = [0] * (len(word2) + 1)
for j, c2 in enumerate(word2, 1):
if c1 == c2:
nxt[j] = prev[j - 1] + 1
else:
nxt[j] = max(nxt[j - 1], prev[j])
prev = nxt
return len(word2) + len(word1) - 2 * prev[len(word2)]
|
Add py solution for 583. Delete Operation for Two Strings
|
Add py solution for 583. Delete Operation for Two Strings
583. Delete Operation for Two Strings: https://leetcode.com/problems/delete-operation-for-two-strings/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 583. Delete Operation for Two Strings
583. Delete Operation for Two Strings: https://leetcode.com/problems/delete-operation-for-two-strings/
|
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
prev = [0] * (len(word2) + 1)
for i, c1 in enumerate(word1, 1):
nxt = [0] * (len(word2) + 1)
for j, c2 in enumerate(word2, 1):
if c1 == c2:
nxt[j] = prev[j - 1] + 1
else:
nxt[j] = max(nxt[j - 1], prev[j])
prev = nxt
return len(word2) + len(word1) - 2 * prev[len(word2)]
|
<commit_before><commit_msg>Add py solution for 583. Delete Operation for Two Strings
583. Delete Operation for Two Strings: https://leetcode.com/problems/delete-operation-for-two-strings/<commit_after>
|
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
prev = [0] * (len(word2) + 1)
for i, c1 in enumerate(word1, 1):
nxt = [0] * (len(word2) + 1)
for j, c2 in enumerate(word2, 1):
if c1 == c2:
nxt[j] = prev[j - 1] + 1
else:
nxt[j] = max(nxt[j - 1], prev[j])
prev = nxt
return len(word2) + len(word1) - 2 * prev[len(word2)]
|
Add py solution for 583. Delete Operation for Two Strings
583. Delete Operation for Two Strings: https://leetcode.com/problems/delete-operation-for-two-strings/class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
prev = [0] * (len(word2) + 1)
for i, c1 in enumerate(word1, 1):
nxt = [0] * (len(word2) + 1)
for j, c2 in enumerate(word2, 1):
if c1 == c2:
nxt[j] = prev[j - 1] + 1
else:
nxt[j] = max(nxt[j - 1], prev[j])
prev = nxt
return len(word2) + len(word1) - 2 * prev[len(word2)]
|
<commit_before><commit_msg>Add py solution for 583. Delete Operation for Two Strings
583. Delete Operation for Two Strings: https://leetcode.com/problems/delete-operation-for-two-strings/<commit_after>class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
prev = [0] * (len(word2) + 1)
for i, c1 in enumerate(word1, 1):
nxt = [0] * (len(word2) + 1)
for j, c2 in enumerate(word2, 1):
if c1 == c2:
nxt[j] = prev[j - 1] + 1
else:
nxt[j] = max(nxt[j - 1], prev[j])
prev = nxt
return len(word2) + len(word1) - 2 * prev[len(word2)]
|
|
51eed338654bfa7c6ebd96cc4e1edb8759bd1bc5
|
recipes/msgf_plus/msgf_plus.py
|
recipes/msgf_plus/msgf_plus.py
|
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'MSGFPlus.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
Add script to launch ms-gf
|
Add script to launch ms-gf
|
Python
|
mit
|
shenwei356/bioconda-recipes,xguse/bioconda-recipes,bioconda/recipes,HassanAmr/bioconda-recipes,saketkc/bioconda-recipes,omicsnut/bioconda-recipes,zachcp/bioconda-recipes,Luobiny/bioconda-recipes,rob-p/bioconda-recipes,ostrokach/bioconda-recipes,rvalieris/bioconda-recipes,gregvonkuster/bioconda-recipes,peterjc/bioconda-recipes,gvlproject/bioconda-recipes,ThomasWollmann/bioconda-recipes,ThomasWollmann/bioconda-recipes,chapmanb/bioconda-recipes,rvalieris/bioconda-recipes,guowei-he/bioconda-recipes,keuv-grvl/bioconda-recipes,mdehollander/bioconda-recipes,blankenberg/bioconda-recipes,npavlovikj/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,daler/bioconda-recipes,pinguinkiste/bioconda-recipes,oena/bioconda-recipes,colinbrislawn/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,chapmanb/bioconda-recipes,ivirshup/bioconda-recipes,dmaticzka/bioconda-recipes,mcornwell1957/bioconda-recipes,bioconda/recipes,abims-sbr/bioconda-recipes,joachimwolff/bioconda-recipes,rvalieris/bioconda-recipes,hardingnj/bioconda-recipes,hardingnj/bioconda-recipes,mcornwell1957/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,oena/bioconda-recipes,lpantano/recipes,dkoppstein/recipes,CGATOxford/bioconda-recipes,bebatut/bioconda-recipes,abims-sbr/bioconda-recipes,xguse/bioconda-recipes,mcornwell1957/bioconda-recipes,guowei-he/bioconda-recipes,saketkc/bioconda-recipes,colinbrislawn/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,zwanli/bioconda-recipes,guowei-he/bioconda-recipes,npavlovikj/bioconda-recipes,chapmanb/bioconda-recipes,jasper1918/bioconda-recipes,abims-sbr/bioconda-recipes,omicsnut/bioconda-recipes,joachimwolff/bioconda-recipes,xguse/bioconda-recipes,bioconda/bioconda-recipes,martin-mann/bioconda-recipes,dmaticzka/bioconda-recipes,rob-p/bioconda-recipes,zwanli/bioconda-recipes,roryk/recipes,JenCabral/bioconda-recipes,pinguinkiste/bioconda-recipes,bebatut/bioconda-recipes,ivirshup/bioconda-recipes,mcornwell1957/bioconda-recipes,ostrokach/bioconda-recipes,rob-p/bioconda-recipes,chapmanb/bioconda-recipes,matthdsm/bioconda-recipes,bow/bioconda-recipes,CGATOxford/bioconda-recipes,blankenberg/bioconda-recipes,HassanAmr/bioconda-recipes,JenCabral/bioconda-recipes,acaprez/recipes,ivirshup/bioconda-recipes,omicsnut/bioconda-recipes,bow/bioconda-recipes,shenwei356/bioconda-recipes,roryk/recipes,Luobiny/bioconda-recipes,matthdsm/bioconda-recipes,mdehollander/bioconda-recipes,joachimwolff/bioconda-recipes,mdehollander/bioconda-recipes,CGATOxford/bioconda-recipes,bow/bioconda-recipes,cokelaer/bioconda-recipes,peterjc/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,ivirshup/bioconda-recipes,phac-nml/bioconda-recipes,zwanli/bioconda-recipes,acaprez/recipes,cokelaer/bioconda-recipes,zachcp/bioconda-recipes,abims-sbr/bioconda-recipes,JenCabral/bioconda-recipes,oena/bioconda-recipes,dmaticzka/bioconda-recipes,cokelaer/bioconda-recipes,oena/bioconda-recipes,shenwei356/bioconda-recipes,zachcp/bioconda-recipes,mdehollander/bioconda-recipes,JenCabral/bioconda-recipes,bioconda/recipes,zwanli/bioconda-recipes,ThomasWollmann/bioconda-recipes,lpantano/recipes,cokelaer/bioconda-recipes,joachimwolff/bioconda-recipes,matthdsm/bioconda-recipes,saketkc/bioconda-recipes,ThomasWollmann/bioconda-recipes,zwanli/bioconda-recipes,CGATOxford/bioconda-recipes,martin-mann/bioconda-recipes,pinguinkiste/bioconda-recipes,dkoppstein/recipes,gvlproject/bioconda-recipes,omicsnut/bioconda-recipes,xguse/bioconda-recipes,bow/bioconda-recipes,colinbrislawn/bioconda-recipes,gvlproject/bioconda-recipes,HassanAmr/bioconda-recipes,daler/bioconda-recipes,guowei-he/bioconda-recipes,hardingnj/bioconda-recipes,matthdsm/bioconda-recipes,peterjc/bioconda-recipes,ThomasWollmann/bioconda-recipes,rvalieris/bioconda-recipes,guowei-he/bioconda-recipes,mcornwell1957/bioconda-recipes,lpantano/recipes,acaprez/recipes,bow/bioconda-recipes,keuv-grvl/bioconda-recipes,saketkc/bioconda-recipes,keuv-grvl/bioconda-recipes,colinbrislawn/bioconda-recipes,jfallmann/bioconda-recipes,bebatut/bioconda-recipes,zwanli/bioconda-recipes,Luobiny/bioconda-recipes,peterjc/bioconda-recipes,HassanAmr/bioconda-recipes,roryk/recipes,ThomasWollmann/bioconda-recipes,chapmanb/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,jasper1918/bioconda-recipes,joachimwolff/bioconda-recipes,phac-nml/bioconda-recipes,jfallmann/bioconda-recipes,martin-mann/bioconda-recipes,gregvonkuster/bioconda-recipes,bow/bioconda-recipes,mdehollander/bioconda-recipes,ostrokach/bioconda-recipes,pinguinkiste/bioconda-recipes,JenCabral/bioconda-recipes,saketkc/bioconda-recipes,pinguinkiste/bioconda-recipes,peterjc/bioconda-recipes,JenCabral/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,pinguinkiste/bioconda-recipes,daler/bioconda-recipes,martin-mann/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,dmaticzka/bioconda-recipes,xguse/bioconda-recipes,colinbrislawn/bioconda-recipes,rob-p/bioconda-recipes,mdehollander/bioconda-recipes,keuv-grvl/bioconda-recipes,jasper1918/bioconda-recipes,BIMSBbioinfo/bioconda-recipes,daler/bioconda-recipes,gvlproject/bioconda-recipes,ivirshup/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes,bioconda/bioconda-recipes,bebatut/bioconda-recipes,npavlovikj/bioconda-recipes,gvlproject/bioconda-recipes,HassanAmr/bioconda-recipes,shenwei356/bioconda-recipes,rvalieris/bioconda-recipes,CGATOxford/bioconda-recipes,colinbrislawn/bioconda-recipes,daler/bioconda-recipes,acaprez/recipes,gregvonkuster/bioconda-recipes,matthdsm/bioconda-recipes,dmaticzka/bioconda-recipes,phac-nml/bioconda-recipes,lpantano/recipes,ostrokach/bioconda-recipes,gvlproject/bioconda-recipes,gregvonkuster/bioconda-recipes,ostrokach/bioconda-recipes,bioconda/bioconda-recipes,matthdsm/bioconda-recipes,hardingnj/bioconda-recipes,jfallmann/bioconda-recipes,jasper1918/bioconda-recipes,joachimwolff/bioconda-recipes,peterjc/bioconda-recipes,ivirshup/bioconda-recipes,phac-nml/bioconda-recipes,dkoppstein/recipes,abims-sbr/bioconda-recipes,omicsnut/bioconda-recipes,keuv-grvl/bioconda-recipes,hardingnj/bioconda-recipes,dmaticzka/bioconda-recipes,keuv-grvl/bioconda-recipes,Luobiny/bioconda-recipes,HassanAmr/bioconda-recipes,daler/bioconda-recipes,phac-nml/bioconda-recipes,zachcp/bioconda-recipes,ostrokach/bioconda-recipes,bioconda/bioconda-recipes,blankenberg/bioconda-recipes,CGATOxford/bioconda-recipes,abims-sbr/bioconda-recipes,oena/bioconda-recipes,jasper1918/bioconda-recipes,npavlovikj/bioconda-recipes,martin-mann/bioconda-recipes,blankenberg/bioconda-recipes,rvalieris/bioconda-recipes,jfallmann/bioconda-recipes,saketkc/bioconda-recipes,instituteofpathologyheidelberg/bioconda-recipes
|
Add script to launch ms-gf
|
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'MSGFPlus.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to launch ms-gf<commit_after>
|
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'MSGFPlus.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
Add script to launch ms-gf#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'MSGFPlus.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to launch ms-gf<commit_after>#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
#
# Program Parameters
#
import os
import sys
import subprocess
from os import access, getenv, X_OK
jar_file = 'MSGFPlus.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') == None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java]+ mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
|
0b90b70d9a87995536d22d9f99f65d82d62e0f8e
|
tests/test_commands.py
|
tests/test_commands.py
|
import unittest
from unittest.mock import MagicMock
from phial.commands import help_command
class TestHelpCommand(unittest.TestCase):
def test_help_command(self):
bot = MagicMock()
command = MagicMock()
command._help = "Test description"
bot.commands = {"test_pattern": command}
bot.command_names = {"test_pattern": "test"}
expected_help_text = "*test* - Test description\n"
help_text = help_command(bot)
self.assertEqual(help_text, expected_help_text)
|
Add test for help command
|
Add test for help command
|
Python
|
mit
|
sedders123/phial
|
Add test for help command
|
import unittest
from unittest.mock import MagicMock
from phial.commands import help_command
class TestHelpCommand(unittest.TestCase):
def test_help_command(self):
bot = MagicMock()
command = MagicMock()
command._help = "Test description"
bot.commands = {"test_pattern": command}
bot.command_names = {"test_pattern": "test"}
expected_help_text = "*test* - Test description\n"
help_text = help_command(bot)
self.assertEqual(help_text, expected_help_text)
|
<commit_before><commit_msg>Add test for help command<commit_after>
|
import unittest
from unittest.mock import MagicMock
from phial.commands import help_command
class TestHelpCommand(unittest.TestCase):
def test_help_command(self):
bot = MagicMock()
command = MagicMock()
command._help = "Test description"
bot.commands = {"test_pattern": command}
bot.command_names = {"test_pattern": "test"}
expected_help_text = "*test* - Test description\n"
help_text = help_command(bot)
self.assertEqual(help_text, expected_help_text)
|
Add test for help commandimport unittest
from unittest.mock import MagicMock
from phial.commands import help_command
class TestHelpCommand(unittest.TestCase):
def test_help_command(self):
bot = MagicMock()
command = MagicMock()
command._help = "Test description"
bot.commands = {"test_pattern": command}
bot.command_names = {"test_pattern": "test"}
expected_help_text = "*test* - Test description\n"
help_text = help_command(bot)
self.assertEqual(help_text, expected_help_text)
|
<commit_before><commit_msg>Add test for help command<commit_after>import unittest
from unittest.mock import MagicMock
from phial.commands import help_command
class TestHelpCommand(unittest.TestCase):
def test_help_command(self):
bot = MagicMock()
command = MagicMock()
command._help = "Test description"
bot.commands = {"test_pattern": command}
bot.command_names = {"test_pattern": "test"}
expected_help_text = "*test* - Test description\n"
help_text = help_command(bot)
self.assertEqual(help_text, expected_help_text)
|
|
20680dbc1cd270c5d46e7ceb4eb57ee81417e519
|
map_client.py
|
map_client.py
|
#!/usr/bin/env python
"""
Use with timeit module as follows::
python -m timeit -n 10 'import map_client; map_client.main()'
Adjust -n 10 for the number of times you want to run the main() method.
"""
import multiprocessing
import os
import sys
import socket
def run(port):
proc_num = os.getpid()
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
cs.connect(('127.0.0.1', int(port)))
except socket.error:
raise Exception("Proc %s - connection failed" % (proc_num))
got_bytes = True
mesg = ''
while got_bytes:
data = cs.recv(1024)
if len(data) <= 0:
got_bytes = False
else:
mesg += data
cs.close()
print 'Proc %s %s' % (proc_num, mesg)
def main():
# run 500 client connections to the server at TCP port 8081
num_procs = 500
pool = multiprocessing.Pool()
print 'Pool created.'
work = [ 8081 for i in range(0, num_procs) ]
pool.map_async(run, work)
pool.close()
pool.join()
print 'Joined. Done.'
if __name__ == '__main__':
main()
|
Add pure python multi-process client
|
Add pure python multi-process client
Avoid using bash to spawn our worker processes,
since it can lose them and cleaning them up is
quite annoying.
|
Python
|
apache-2.0
|
locke105/drunken-tribble,locke105/drunken-tribble
|
Add pure python multi-process client
Avoid using bash to spawn our worker processes,
since it can lose them and cleaning them up is
quite annoying.
|
#!/usr/bin/env python
"""
Use with timeit module as follows::
python -m timeit -n 10 'import map_client; map_client.main()'
Adjust -n 10 for the number of times you want to run the main() method.
"""
import multiprocessing
import os
import sys
import socket
def run(port):
proc_num = os.getpid()
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
cs.connect(('127.0.0.1', int(port)))
except socket.error:
raise Exception("Proc %s - connection failed" % (proc_num))
got_bytes = True
mesg = ''
while got_bytes:
data = cs.recv(1024)
if len(data) <= 0:
got_bytes = False
else:
mesg += data
cs.close()
print 'Proc %s %s' % (proc_num, mesg)
def main():
# run 500 client connections to the server at TCP port 8081
num_procs = 500
pool = multiprocessing.Pool()
print 'Pool created.'
work = [ 8081 for i in range(0, num_procs) ]
pool.map_async(run, work)
pool.close()
pool.join()
print 'Joined. Done.'
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add pure python multi-process client
Avoid using bash to spawn our worker processes,
since it can lose them and cleaning them up is
quite annoying.<commit_after>
|
#!/usr/bin/env python
"""
Use with timeit module as follows::
python -m timeit -n 10 'import map_client; map_client.main()'
Adjust -n 10 for the number of times you want to run the main() method.
"""
import multiprocessing
import os
import sys
import socket
def run(port):
proc_num = os.getpid()
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
cs.connect(('127.0.0.1', int(port)))
except socket.error:
raise Exception("Proc %s - connection failed" % (proc_num))
got_bytes = True
mesg = ''
while got_bytes:
data = cs.recv(1024)
if len(data) <= 0:
got_bytes = False
else:
mesg += data
cs.close()
print 'Proc %s %s' % (proc_num, mesg)
def main():
# run 500 client connections to the server at TCP port 8081
num_procs = 500
pool = multiprocessing.Pool()
print 'Pool created.'
work = [ 8081 for i in range(0, num_procs) ]
pool.map_async(run, work)
pool.close()
pool.join()
print 'Joined. Done.'
if __name__ == '__main__':
main()
|
Add pure python multi-process client
Avoid using bash to spawn our worker processes,
since it can lose them and cleaning them up is
quite annoying.#!/usr/bin/env python
"""
Use with timeit module as follows::
python -m timeit -n 10 'import map_client; map_client.main()'
Adjust -n 10 for the number of times you want to run the main() method.
"""
import multiprocessing
import os
import sys
import socket
def run(port):
proc_num = os.getpid()
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
cs.connect(('127.0.0.1', int(port)))
except socket.error:
raise Exception("Proc %s - connection failed" % (proc_num))
got_bytes = True
mesg = ''
while got_bytes:
data = cs.recv(1024)
if len(data) <= 0:
got_bytes = False
else:
mesg += data
cs.close()
print 'Proc %s %s' % (proc_num, mesg)
def main():
# run 500 client connections to the server at TCP port 8081
num_procs = 500
pool = multiprocessing.Pool()
print 'Pool created.'
work = [ 8081 for i in range(0, num_procs) ]
pool.map_async(run, work)
pool.close()
pool.join()
print 'Joined. Done.'
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add pure python multi-process client
Avoid using bash to spawn our worker processes,
since it can lose them and cleaning them up is
quite annoying.<commit_after>#!/usr/bin/env python
"""
Use with timeit module as follows::
python -m timeit -n 10 'import map_client; map_client.main()'
Adjust -n 10 for the number of times you want to run the main() method.
"""
import multiprocessing
import os
import sys
import socket
def run(port):
proc_num = os.getpid()
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
cs.connect(('127.0.0.1', int(port)))
except socket.error:
raise Exception("Proc %s - connection failed" % (proc_num))
got_bytes = True
mesg = ''
while got_bytes:
data = cs.recv(1024)
if len(data) <= 0:
got_bytes = False
else:
mesg += data
cs.close()
print 'Proc %s %s' % (proc_num, mesg)
def main():
# run 500 client connections to the server at TCP port 8081
num_procs = 500
pool = multiprocessing.Pool()
print 'Pool created.'
work = [ 8081 for i in range(0, num_procs) ]
pool.map_async(run, work)
pool.close()
pool.join()
print 'Joined. Done.'
if __name__ == '__main__':
main()
|
|
fbe442ad7541a10fba461347c8dcecdc3cab8a26
|
examples/image_segmentation.py
|
examples/image_segmentation.py
|
"""
===========================================
Semantic Image Segmentation on Pascal VOC
===========================================
This example demonstrates learning a superpixel CRF
for semantic image segmentation.
To run the experiment, please download the pre-processed data from:
The data consists of superpixels, unary potentials, and the connectivity
structure of the superpixels.
The unary potentials were originally provided by Philipp Kraehenbuehl:
The superpixels were extracted using SLIC.
The code for generating the connectivity graph and edge features will be made
public soon.
This example does not contain the proper evaluation on pixel level, as that
would need the Pascal VOC 2010 dataset.
"""
import numpy as np
import cPickle
from pystruct import learners
import pystruct.models as crfs
from pystruct.utils import SaveLogger
data_train = cPickle.load(open("data_train.pickle"))
C = 0.01
n_states = 21
print("number of samples: %s" % len(data_train.X))
class_weights = 1. / np.bincount(np.hstack(data_train.Y))
class_weights *= 21. / np.sum(class_weights)
print(class_weights)
model = crfs.EdgeFeatureGraphCRF(n_states=n_states,
n_features=data_train.X[0][0].shape[1],
inference_method='qpbo',
class_weight=class_weights,
n_edge_features=3,
symmetric_edge_features=[0, 1],
antisymmetric_edge_features=[2])
experiment_name = "edge_features_one_slack_trainval_%f" % C
ssvm = learners.NSlackSSVM(
model, verbose=2, C=C, max_iter=100000, n_jobs=-1,
tol=0.0001, show_loss_every=5,
logger=SaveLogger(experiment_name + ".pickle", save_every=100),
inactive_threshold=1e-3, inactive_window=10, batch_size=100)
ssvm.fit(data_train.X, data_train.Y)
data_val = cPickle.load(open("data_val.pickle"))
y_pred = ssvm.predict(data_val.X)
# we throw away void superpixels and flatten everything
y_pred, y_true = np.hstack(y_pred), np.hstack(data_val.Y)
y_pred = y_pred[y_true != 255]
y_true = y_true[y_true != 255]
print("Score on validation set: %f" % np.mean(y_true == y_pred))
|
Add pascal image segmentation example
|
Add pascal image segmentation example
|
Python
|
bsd-2-clause
|
wattlebird/pystruct,massmutual/pystruct,amueller/pystruct,massmutual/pystruct,wattlebird/pystruct,d-mittal/pystruct,pystruct/pystruct,amueller/pystruct,d-mittal/pystruct,pystruct/pystruct
|
Add pascal image segmentation example
|
"""
===========================================
Semantic Image Segmentation on Pascal VOC
===========================================
This example demonstrates learning a superpixel CRF
for semantic image segmentation.
To run the experiment, please download the pre-processed data from:
The data consists of superpixels, unary potentials, and the connectivity
structure of the superpixels.
The unary potentials were originally provided by Philipp Kraehenbuehl:
The superpixels were extracted using SLIC.
The code for generating the connectivity graph and edge features will be made
public soon.
This example does not contain the proper evaluation on pixel level, as that
would need the Pascal VOC 2010 dataset.
"""
import numpy as np
import cPickle
from pystruct import learners
import pystruct.models as crfs
from pystruct.utils import SaveLogger
data_train = cPickle.load(open("data_train.pickle"))
C = 0.01
n_states = 21
print("number of samples: %s" % len(data_train.X))
class_weights = 1. / np.bincount(np.hstack(data_train.Y))
class_weights *= 21. / np.sum(class_weights)
print(class_weights)
model = crfs.EdgeFeatureGraphCRF(n_states=n_states,
n_features=data_train.X[0][0].shape[1],
inference_method='qpbo',
class_weight=class_weights,
n_edge_features=3,
symmetric_edge_features=[0, 1],
antisymmetric_edge_features=[2])
experiment_name = "edge_features_one_slack_trainval_%f" % C
ssvm = learners.NSlackSSVM(
model, verbose=2, C=C, max_iter=100000, n_jobs=-1,
tol=0.0001, show_loss_every=5,
logger=SaveLogger(experiment_name + ".pickle", save_every=100),
inactive_threshold=1e-3, inactive_window=10, batch_size=100)
ssvm.fit(data_train.X, data_train.Y)
data_val = cPickle.load(open("data_val.pickle"))
y_pred = ssvm.predict(data_val.X)
# we throw away void superpixels and flatten everything
y_pred, y_true = np.hstack(y_pred), np.hstack(data_val.Y)
y_pred = y_pred[y_true != 255]
y_true = y_true[y_true != 255]
print("Score on validation set: %f" % np.mean(y_true == y_pred))
|
<commit_before><commit_msg>Add pascal image segmentation example<commit_after>
|
"""
===========================================
Semantic Image Segmentation on Pascal VOC
===========================================
This example demonstrates learning a superpixel CRF
for semantic image segmentation.
To run the experiment, please download the pre-processed data from:
The data consists of superpixels, unary potentials, and the connectivity
structure of the superpixels.
The unary potentials were originally provided by Philipp Kraehenbuehl:
The superpixels were extracted using SLIC.
The code for generating the connectivity graph and edge features will be made
public soon.
This example does not contain the proper evaluation on pixel level, as that
would need the Pascal VOC 2010 dataset.
"""
import numpy as np
import cPickle
from pystruct import learners
import pystruct.models as crfs
from pystruct.utils import SaveLogger
data_train = cPickle.load(open("data_train.pickle"))
C = 0.01
n_states = 21
print("number of samples: %s" % len(data_train.X))
class_weights = 1. / np.bincount(np.hstack(data_train.Y))
class_weights *= 21. / np.sum(class_weights)
print(class_weights)
model = crfs.EdgeFeatureGraphCRF(n_states=n_states,
n_features=data_train.X[0][0].shape[1],
inference_method='qpbo',
class_weight=class_weights,
n_edge_features=3,
symmetric_edge_features=[0, 1],
antisymmetric_edge_features=[2])
experiment_name = "edge_features_one_slack_trainval_%f" % C
ssvm = learners.NSlackSSVM(
model, verbose=2, C=C, max_iter=100000, n_jobs=-1,
tol=0.0001, show_loss_every=5,
logger=SaveLogger(experiment_name + ".pickle", save_every=100),
inactive_threshold=1e-3, inactive_window=10, batch_size=100)
ssvm.fit(data_train.X, data_train.Y)
data_val = cPickle.load(open("data_val.pickle"))
y_pred = ssvm.predict(data_val.X)
# we throw away void superpixels and flatten everything
y_pred, y_true = np.hstack(y_pred), np.hstack(data_val.Y)
y_pred = y_pred[y_true != 255]
y_true = y_true[y_true != 255]
print("Score on validation set: %f" % np.mean(y_true == y_pred))
|
Add pascal image segmentation example"""
===========================================
Semantic Image Segmentation on Pascal VOC
===========================================
This example demonstrates learning a superpixel CRF
for semantic image segmentation.
To run the experiment, please download the pre-processed data from:
The data consists of superpixels, unary potentials, and the connectivity
structure of the superpixels.
The unary potentials were originally provided by Philipp Kraehenbuehl:
The superpixels were extracted using SLIC.
The code for generating the connectivity graph and edge features will be made
public soon.
This example does not contain the proper evaluation on pixel level, as that
would need the Pascal VOC 2010 dataset.
"""
import numpy as np
import cPickle
from pystruct import learners
import pystruct.models as crfs
from pystruct.utils import SaveLogger
data_train = cPickle.load(open("data_train.pickle"))
C = 0.01
n_states = 21
print("number of samples: %s" % len(data_train.X))
class_weights = 1. / np.bincount(np.hstack(data_train.Y))
class_weights *= 21. / np.sum(class_weights)
print(class_weights)
model = crfs.EdgeFeatureGraphCRF(n_states=n_states,
n_features=data_train.X[0][0].shape[1],
inference_method='qpbo',
class_weight=class_weights,
n_edge_features=3,
symmetric_edge_features=[0, 1],
antisymmetric_edge_features=[2])
experiment_name = "edge_features_one_slack_trainval_%f" % C
ssvm = learners.NSlackSSVM(
model, verbose=2, C=C, max_iter=100000, n_jobs=-1,
tol=0.0001, show_loss_every=5,
logger=SaveLogger(experiment_name + ".pickle", save_every=100),
inactive_threshold=1e-3, inactive_window=10, batch_size=100)
ssvm.fit(data_train.X, data_train.Y)
data_val = cPickle.load(open("data_val.pickle"))
y_pred = ssvm.predict(data_val.X)
# we throw away void superpixels and flatten everything
y_pred, y_true = np.hstack(y_pred), np.hstack(data_val.Y)
y_pred = y_pred[y_true != 255]
y_true = y_true[y_true != 255]
print("Score on validation set: %f" % np.mean(y_true == y_pred))
|
<commit_before><commit_msg>Add pascal image segmentation example<commit_after>"""
===========================================
Semantic Image Segmentation on Pascal VOC
===========================================
This example demonstrates learning a superpixel CRF
for semantic image segmentation.
To run the experiment, please download the pre-processed data from:
The data consists of superpixels, unary potentials, and the connectivity
structure of the superpixels.
The unary potentials were originally provided by Philipp Kraehenbuehl:
The superpixels were extracted using SLIC.
The code for generating the connectivity graph and edge features will be made
public soon.
This example does not contain the proper evaluation on pixel level, as that
would need the Pascal VOC 2010 dataset.
"""
import numpy as np
import cPickle
from pystruct import learners
import pystruct.models as crfs
from pystruct.utils import SaveLogger
data_train = cPickle.load(open("data_train.pickle"))
C = 0.01
n_states = 21
print("number of samples: %s" % len(data_train.X))
class_weights = 1. / np.bincount(np.hstack(data_train.Y))
class_weights *= 21. / np.sum(class_weights)
print(class_weights)
model = crfs.EdgeFeatureGraphCRF(n_states=n_states,
n_features=data_train.X[0][0].shape[1],
inference_method='qpbo',
class_weight=class_weights,
n_edge_features=3,
symmetric_edge_features=[0, 1],
antisymmetric_edge_features=[2])
experiment_name = "edge_features_one_slack_trainval_%f" % C
ssvm = learners.NSlackSSVM(
model, verbose=2, C=C, max_iter=100000, n_jobs=-1,
tol=0.0001, show_loss_every=5,
logger=SaveLogger(experiment_name + ".pickle", save_every=100),
inactive_threshold=1e-3, inactive_window=10, batch_size=100)
ssvm.fit(data_train.X, data_train.Y)
data_val = cPickle.load(open("data_val.pickle"))
y_pred = ssvm.predict(data_val.X)
# we throw away void superpixels and flatten everything
y_pred, y_true = np.hstack(y_pred), np.hstack(data_val.Y)
y_pred = y_pred[y_true != 255]
y_true = y_true[y_true != 255]
print("Score on validation set: %f" % np.mean(y_true == y_pred))
|
|
5016d3d088852b0396044f53493eaa14cee00d23
|
tm_set.py
|
tm_set.py
|
'''
Created on Mar 1, 2015
@author: namezys
Set time machine backup disable flag
'''
import os
from argparse import ArgumentParser
from tm_police.ctrl import enable_tm
from tm_police.ctrl import disable_tm
DESCR = """Set or reset time machine disable flag"""
def main():
parser = ArgumentParser(description=DESCR)
parser.add_argument("-e", "--enable", action="store_true",
help="Enable TM backup. Otherwise disable")
parser.add_argument("paths", nargs="+", help="path for check")
args = parser.parse_args()
paths = args.paths
for path in paths:
if not os.path.exists(path):
print path, "don't exists"
return
for path in paths:
if args.enable:
enable_tm(path)
else:
disable_tm(path)
if __name__ == '__main__':
main()
|
Add script for disable/enable tm backup
|
Add script for disable/enable tm backup
|
Python
|
bsd-2-clause
|
namezys/tm_police
|
Add script for disable/enable tm backup
|
'''
Created on Mar 1, 2015
@author: namezys
Set time machine backup disable flag
'''
import os
from argparse import ArgumentParser
from tm_police.ctrl import enable_tm
from tm_police.ctrl import disable_tm
DESCR = """Set or reset time machine disable flag"""
def main():
parser = ArgumentParser(description=DESCR)
parser.add_argument("-e", "--enable", action="store_true",
help="Enable TM backup. Otherwise disable")
parser.add_argument("paths", nargs="+", help="path for check")
args = parser.parse_args()
paths = args.paths
for path in paths:
if not os.path.exists(path):
print path, "don't exists"
return
for path in paths:
if args.enable:
enable_tm(path)
else:
disable_tm(path)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for disable/enable tm backup<commit_after>
|
'''
Created on Mar 1, 2015
@author: namezys
Set time machine backup disable flag
'''
import os
from argparse import ArgumentParser
from tm_police.ctrl import enable_tm
from tm_police.ctrl import disable_tm
DESCR = """Set or reset time machine disable flag"""
def main():
parser = ArgumentParser(description=DESCR)
parser.add_argument("-e", "--enable", action="store_true",
help="Enable TM backup. Otherwise disable")
parser.add_argument("paths", nargs="+", help="path for check")
args = parser.parse_args()
paths = args.paths
for path in paths:
if not os.path.exists(path):
print path, "don't exists"
return
for path in paths:
if args.enable:
enable_tm(path)
else:
disable_tm(path)
if __name__ == '__main__':
main()
|
Add script for disable/enable tm backup'''
Created on Mar 1, 2015
@author: namezys
Set time machine backup disable flag
'''
import os
from argparse import ArgumentParser
from tm_police.ctrl import enable_tm
from tm_police.ctrl import disable_tm
DESCR = """Set or reset time machine disable flag"""
def main():
parser = ArgumentParser(description=DESCR)
parser.add_argument("-e", "--enable", action="store_true",
help="Enable TM backup. Otherwise disable")
parser.add_argument("paths", nargs="+", help="path for check")
args = parser.parse_args()
paths = args.paths
for path in paths:
if not os.path.exists(path):
print path, "don't exists"
return
for path in paths:
if args.enable:
enable_tm(path)
else:
disable_tm(path)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for disable/enable tm backup<commit_after>'''
Created on Mar 1, 2015
@author: namezys
Set time machine backup disable flag
'''
import os
from argparse import ArgumentParser
from tm_police.ctrl import enable_tm
from tm_police.ctrl import disable_tm
DESCR = """Set or reset time machine disable flag"""
def main():
parser = ArgumentParser(description=DESCR)
parser.add_argument("-e", "--enable", action="store_true",
help="Enable TM backup. Otherwise disable")
parser.add_argument("paths", nargs="+", help="path for check")
args = parser.parse_args()
paths = args.paths
for path in paths:
if not os.path.exists(path):
print path, "don't exists"
return
for path in paths:
if args.enable:
enable_tm(path)
else:
disable_tm(path)
if __name__ == '__main__':
main()
|
|
716091f26af29f308c830c172398ff1c6815accf
|
scripts/status_consistency.py
|
scripts/status_consistency.py
|
from app import geo
from app.firebase import db
from app.constants import locationsTable, venuesTable, GPS_LOCATIONS
# The casing of tripadvisor is not consistent across our Firebase scripts
TA_PROVIDER = "tripadvisor"
TA_DETAILS = "tripAdvisor"
dryRun = True
"""
Script to fix error states in the status table (e.g., places that have details
but are not not after adding new sources, due to accidental
"""
def fixStatus(center, radius_km, provider, detailsProvider, version):
location_table = db().child(locationsTable).get().val()
placeIDs = geo.get_place_ids_in_radius(center, radius_km, location_table)
print("number found {}".format(len(placeIDs)))
statusTable = db().child(venuesTable, "status").get().val()
placeDetails = db().child(venuesTable, "details").get().val()
count = 0
placeList = []
for placeID in placeIDs:
providers = placeDetails[placeID]["providers"]
if detailsProvider in providers and statusTable[placeID][provider] == -1:
st = db().child(venuesTable, "status", placeID)
if not dryRun:
st.update({provider: version})
count += 1
placeList.append(placeID)
placeList.sort()
print("Places updated: {}".format(placeList))
print("total {} places with details: {}".format(detailsProvider, count))
if __name__ == '__main__':
fixStatus(GPS_LOCATIONS["CHICAGO_CENTER"], 30, TA_PROVIDER, TA_DETAILS, 3)
|
Add script for making status consistent.
|
Add script for making status consistent.
|
Python
|
mpl-2.0
|
liuche/prox-server
|
Add script for making status consistent.
|
from app import geo
from app.firebase import db
from app.constants import locationsTable, venuesTable, GPS_LOCATIONS
# The casing of tripadvisor is not consistent across our Firebase scripts
TA_PROVIDER = "tripadvisor"
TA_DETAILS = "tripAdvisor"
dryRun = True
"""
Script to fix error states in the status table (e.g., places that have details
but are not not after adding new sources, due to accidental
"""
def fixStatus(center, radius_km, provider, detailsProvider, version):
location_table = db().child(locationsTable).get().val()
placeIDs = geo.get_place_ids_in_radius(center, radius_km, location_table)
print("number found {}".format(len(placeIDs)))
statusTable = db().child(venuesTable, "status").get().val()
placeDetails = db().child(venuesTable, "details").get().val()
count = 0
placeList = []
for placeID in placeIDs:
providers = placeDetails[placeID]["providers"]
if detailsProvider in providers and statusTable[placeID][provider] == -1:
st = db().child(venuesTable, "status", placeID)
if not dryRun:
st.update({provider: version})
count += 1
placeList.append(placeID)
placeList.sort()
print("Places updated: {}".format(placeList))
print("total {} places with details: {}".format(detailsProvider, count))
if __name__ == '__main__':
fixStatus(GPS_LOCATIONS["CHICAGO_CENTER"], 30, TA_PROVIDER, TA_DETAILS, 3)
|
<commit_before><commit_msg>Add script for making status consistent.<commit_after>
|
from app import geo
from app.firebase import db
from app.constants import locationsTable, venuesTable, GPS_LOCATIONS
# The casing of tripadvisor is not consistent across our Firebase scripts
TA_PROVIDER = "tripadvisor"
TA_DETAILS = "tripAdvisor"
dryRun = True
"""
Script to fix error states in the status table (e.g., places that have details
but are not not after adding new sources, due to accidental
"""
def fixStatus(center, radius_km, provider, detailsProvider, version):
location_table = db().child(locationsTable).get().val()
placeIDs = geo.get_place_ids_in_radius(center, radius_km, location_table)
print("number found {}".format(len(placeIDs)))
statusTable = db().child(venuesTable, "status").get().val()
placeDetails = db().child(venuesTable, "details").get().val()
count = 0
placeList = []
for placeID in placeIDs:
providers = placeDetails[placeID]["providers"]
if detailsProvider in providers and statusTable[placeID][provider] == -1:
st = db().child(venuesTable, "status", placeID)
if not dryRun:
st.update({provider: version})
count += 1
placeList.append(placeID)
placeList.sort()
print("Places updated: {}".format(placeList))
print("total {} places with details: {}".format(detailsProvider, count))
if __name__ == '__main__':
fixStatus(GPS_LOCATIONS["CHICAGO_CENTER"], 30, TA_PROVIDER, TA_DETAILS, 3)
|
Add script for making status consistent.from app import geo
from app.firebase import db
from app.constants import locationsTable, venuesTable, GPS_LOCATIONS
# The casing of tripadvisor is not consistent across our Firebase scripts
TA_PROVIDER = "tripadvisor"
TA_DETAILS = "tripAdvisor"
dryRun = True
"""
Script to fix error states in the status table (e.g., places that have details
but are not not after adding new sources, due to accidental
"""
def fixStatus(center, radius_km, provider, detailsProvider, version):
location_table = db().child(locationsTable).get().val()
placeIDs = geo.get_place_ids_in_radius(center, radius_km, location_table)
print("number found {}".format(len(placeIDs)))
statusTable = db().child(venuesTable, "status").get().val()
placeDetails = db().child(venuesTable, "details").get().val()
count = 0
placeList = []
for placeID in placeIDs:
providers = placeDetails[placeID]["providers"]
if detailsProvider in providers and statusTable[placeID][provider] == -1:
st = db().child(venuesTable, "status", placeID)
if not dryRun:
st.update({provider: version})
count += 1
placeList.append(placeID)
placeList.sort()
print("Places updated: {}".format(placeList))
print("total {} places with details: {}".format(detailsProvider, count))
if __name__ == '__main__':
fixStatus(GPS_LOCATIONS["CHICAGO_CENTER"], 30, TA_PROVIDER, TA_DETAILS, 3)
|
<commit_before><commit_msg>Add script for making status consistent.<commit_after>from app import geo
from app.firebase import db
from app.constants import locationsTable, venuesTable, GPS_LOCATIONS
# The casing of tripadvisor is not consistent across our Firebase scripts
TA_PROVIDER = "tripadvisor"
TA_DETAILS = "tripAdvisor"
dryRun = True
"""
Script to fix error states in the status table (e.g., places that have details
but are not not after adding new sources, due to accidental
"""
def fixStatus(center, radius_km, provider, detailsProvider, version):
location_table = db().child(locationsTable).get().val()
placeIDs = geo.get_place_ids_in_radius(center, radius_km, location_table)
print("number found {}".format(len(placeIDs)))
statusTable = db().child(venuesTable, "status").get().val()
placeDetails = db().child(venuesTable, "details").get().val()
count = 0
placeList = []
for placeID in placeIDs:
providers = placeDetails[placeID]["providers"]
if detailsProvider in providers and statusTable[placeID][provider] == -1:
st = db().child(venuesTable, "status", placeID)
if not dryRun:
st.update({provider: version})
count += 1
placeList.append(placeID)
placeList.sort()
print("Places updated: {}".format(placeList))
print("total {} places with details: {}".format(detailsProvider, count))
if __name__ == '__main__':
fixStatus(GPS_LOCATIONS["CHICAGO_CENTER"], 30, TA_PROVIDER, TA_DETAILS, 3)
|
|
00b1b343fe46140b6562a3b32fa3f5c5131ea8cf
|
client/examples/intelligentmining.py
|
client/examples/intelligentmining.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from botchallenge import Robot
from botchallenge import BlockType
from botchallenge import Dir
import random
USERNAME = "rafi0t" # Put your minecraft username here
SERVER = "localhost" # Put the address of the minecraft server here
robot = Robot(USERNAME, SERVER)
elements = [BlockType.DIAMOND, BlockType.GOLD_ORE, BlockType.IRON_ORE,
BlockType.COAL_ORE]
def force_move(direction):
if robot.is_block_solid(direction):
robot.mine(direction)
robot.move(direction)
def lookup(elements):
for e in elements:
locations = robot.find_type_nearby(e)
if len(locations) > 0:
return locations
return None
def rand_dig(distance=10):
directions = [Dir.DOWN, Dir.UP, Dir.NORTH, Dir.SOUTH, Dir.EAST, Dir.WEST]
r_dig = random.choice(directions)
while robot.move(r_dig):
distance -= 1
for i in range(distance):
robot.mine(r_dig)
def too_far(origin, max_distance=50):
return (origin.x_coord >= origin.x_coord + max_distance or
origin.x_coord <= origin.x_coord - max_distance or
origin.y_coord >= origin.y_coord + max_distance or
origin.y_coord <= origin.y_coord - max_distance or
origin.z_coord >= origin.z_coord + max_distance or
origin.z_coord <= origin.z_coord - max_distance)
def goto(location):
while True:
direction = robot.get_location().direction(location)
if direction is None:
break
force_move(direction)
def get_blocks(locations):
for loc in locations:
goto(loc)
if __name__ == "__main__":
origin = robot.get_location()
while not too_far(origin, 10):
locations = lookup(elements)
if locations is None:
rand_dig(5)
continue
get_blocks(locations)
robot.get_inventory()
goto(origin)
|
Add preliminary version of the intelligent miner robot
|
Add preliminary version of the intelligent miner robot
|
Python
|
mit
|
Rafiot/botchallenge,Rafiot/botchallenge,Rafiot/botchallenge,Rafiot/botchallenge
|
Add preliminary version of the intelligent miner robot
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from botchallenge import Robot
from botchallenge import BlockType
from botchallenge import Dir
import random
USERNAME = "rafi0t" # Put your minecraft username here
SERVER = "localhost" # Put the address of the minecraft server here
robot = Robot(USERNAME, SERVER)
elements = [BlockType.DIAMOND, BlockType.GOLD_ORE, BlockType.IRON_ORE,
BlockType.COAL_ORE]
def force_move(direction):
if robot.is_block_solid(direction):
robot.mine(direction)
robot.move(direction)
def lookup(elements):
for e in elements:
locations = robot.find_type_nearby(e)
if len(locations) > 0:
return locations
return None
def rand_dig(distance=10):
directions = [Dir.DOWN, Dir.UP, Dir.NORTH, Dir.SOUTH, Dir.EAST, Dir.WEST]
r_dig = random.choice(directions)
while robot.move(r_dig):
distance -= 1
for i in range(distance):
robot.mine(r_dig)
def too_far(origin, max_distance=50):
return (origin.x_coord >= origin.x_coord + max_distance or
origin.x_coord <= origin.x_coord - max_distance or
origin.y_coord >= origin.y_coord + max_distance or
origin.y_coord <= origin.y_coord - max_distance or
origin.z_coord >= origin.z_coord + max_distance or
origin.z_coord <= origin.z_coord - max_distance)
def goto(location):
while True:
direction = robot.get_location().direction(location)
if direction is None:
break
force_move(direction)
def get_blocks(locations):
for loc in locations:
goto(loc)
if __name__ == "__main__":
origin = robot.get_location()
while not too_far(origin, 10):
locations = lookup(elements)
if locations is None:
rand_dig(5)
continue
get_blocks(locations)
robot.get_inventory()
goto(origin)
|
<commit_before><commit_msg>Add preliminary version of the intelligent miner robot<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from botchallenge import Robot
from botchallenge import BlockType
from botchallenge import Dir
import random
USERNAME = "rafi0t" # Put your minecraft username here
SERVER = "localhost" # Put the address of the minecraft server here
robot = Robot(USERNAME, SERVER)
elements = [BlockType.DIAMOND, BlockType.GOLD_ORE, BlockType.IRON_ORE,
BlockType.COAL_ORE]
def force_move(direction):
if robot.is_block_solid(direction):
robot.mine(direction)
robot.move(direction)
def lookup(elements):
for e in elements:
locations = robot.find_type_nearby(e)
if len(locations) > 0:
return locations
return None
def rand_dig(distance=10):
directions = [Dir.DOWN, Dir.UP, Dir.NORTH, Dir.SOUTH, Dir.EAST, Dir.WEST]
r_dig = random.choice(directions)
while robot.move(r_dig):
distance -= 1
for i in range(distance):
robot.mine(r_dig)
def too_far(origin, max_distance=50):
return (origin.x_coord >= origin.x_coord + max_distance or
origin.x_coord <= origin.x_coord - max_distance or
origin.y_coord >= origin.y_coord + max_distance or
origin.y_coord <= origin.y_coord - max_distance or
origin.z_coord >= origin.z_coord + max_distance or
origin.z_coord <= origin.z_coord - max_distance)
def goto(location):
while True:
direction = robot.get_location().direction(location)
if direction is None:
break
force_move(direction)
def get_blocks(locations):
for loc in locations:
goto(loc)
if __name__ == "__main__":
origin = robot.get_location()
while not too_far(origin, 10):
locations = lookup(elements)
if locations is None:
rand_dig(5)
continue
get_blocks(locations)
robot.get_inventory()
goto(origin)
|
Add preliminary version of the intelligent miner robot#!/usr/bin/env python
# -*- coding: utf-8 -*-
from botchallenge import Robot
from botchallenge import BlockType
from botchallenge import Dir
import random
USERNAME = "rafi0t" # Put your minecraft username here
SERVER = "localhost" # Put the address of the minecraft server here
robot = Robot(USERNAME, SERVER)
elements = [BlockType.DIAMOND, BlockType.GOLD_ORE, BlockType.IRON_ORE,
BlockType.COAL_ORE]
def force_move(direction):
if robot.is_block_solid(direction):
robot.mine(direction)
robot.move(direction)
def lookup(elements):
for e in elements:
locations = robot.find_type_nearby(e)
if len(locations) > 0:
return locations
return None
def rand_dig(distance=10):
directions = [Dir.DOWN, Dir.UP, Dir.NORTH, Dir.SOUTH, Dir.EAST, Dir.WEST]
r_dig = random.choice(directions)
while robot.move(r_dig):
distance -= 1
for i in range(distance):
robot.mine(r_dig)
def too_far(origin, max_distance=50):
return (origin.x_coord >= origin.x_coord + max_distance or
origin.x_coord <= origin.x_coord - max_distance or
origin.y_coord >= origin.y_coord + max_distance or
origin.y_coord <= origin.y_coord - max_distance or
origin.z_coord >= origin.z_coord + max_distance or
origin.z_coord <= origin.z_coord - max_distance)
def goto(location):
while True:
direction = robot.get_location().direction(location)
if direction is None:
break
force_move(direction)
def get_blocks(locations):
for loc in locations:
goto(loc)
if __name__ == "__main__":
origin = robot.get_location()
while not too_far(origin, 10):
locations = lookup(elements)
if locations is None:
rand_dig(5)
continue
get_blocks(locations)
robot.get_inventory()
goto(origin)
|
<commit_before><commit_msg>Add preliminary version of the intelligent miner robot<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from botchallenge import Robot
from botchallenge import BlockType
from botchallenge import Dir
import random
USERNAME = "rafi0t" # Put your minecraft username here
SERVER = "localhost" # Put the address of the minecraft server here
robot = Robot(USERNAME, SERVER)
elements = [BlockType.DIAMOND, BlockType.GOLD_ORE, BlockType.IRON_ORE,
BlockType.COAL_ORE]
def force_move(direction):
if robot.is_block_solid(direction):
robot.mine(direction)
robot.move(direction)
def lookup(elements):
for e in elements:
locations = robot.find_type_nearby(e)
if len(locations) > 0:
return locations
return None
def rand_dig(distance=10):
directions = [Dir.DOWN, Dir.UP, Dir.NORTH, Dir.SOUTH, Dir.EAST, Dir.WEST]
r_dig = random.choice(directions)
while robot.move(r_dig):
distance -= 1
for i in range(distance):
robot.mine(r_dig)
def too_far(origin, max_distance=50):
return (origin.x_coord >= origin.x_coord + max_distance or
origin.x_coord <= origin.x_coord - max_distance or
origin.y_coord >= origin.y_coord + max_distance or
origin.y_coord <= origin.y_coord - max_distance or
origin.z_coord >= origin.z_coord + max_distance or
origin.z_coord <= origin.z_coord - max_distance)
def goto(location):
while True:
direction = robot.get_location().direction(location)
if direction is None:
break
force_move(direction)
def get_blocks(locations):
for loc in locations:
goto(loc)
if __name__ == "__main__":
origin = robot.get_location()
while not too_far(origin, 10):
locations = lookup(elements)
if locations is None:
rand_dig(5)
continue
get_blocks(locations)
robot.get_inventory()
goto(origin)
|
|
d1f354e389f97b3a13191065b3d4f585798efaa3
|
tests/cupy_tests/core_tests/test_ndarray_owndata.py
|
tests/cupy_tests/core_tests/test_ndarray_owndata.py
|
import unittest
from cupy import core
from cupy import testing
@testing.gpu
class TestArrayOwndata(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.a = core.ndarray(())
def test_original_array(self):
self.assertTrue(self.a.flags.owndata)
def test_view_array(self):
v = self.a.view()
self.assertFalse(v.flags.owndata)
def test_reshaped_array(self):
r = self.a.reshape(())
self.assertFalse(r.flags.owndata)
|
Add test for OWNDATA flag
|
Add test for OWNDATA flag
|
Python
|
mit
|
kiyukuta/chainer,ktnyt/chainer,keisuke-umezawa/chainer,hvy/chainer,wkentaro/chainer,pfnet/chainer,jnishi/chainer,cupy/cupy,jnishi/chainer,ysekky/chainer,wkentaro/chainer,hvy/chainer,chainer/chainer,wkentaro/chainer,tkerola/chainer,aonotas/chainer,ktnyt/chainer,ktnyt/chainer,jnishi/chainer,niboshi/chainer,okuta/chainer,wkentaro/chainer,ktnyt/chainer,keisuke-umezawa/chainer,cupy/cupy,chainer/chainer,chainer/chainer,jnishi/chainer,rezoo/chainer,chainer/chainer,okuta/chainer,niboshi/chainer,hvy/chainer,hvy/chainer,okuta/chainer,cupy/cupy,keisuke-umezawa/chainer,niboshi/chainer,delta2323/chainer,cupy/cupy,keisuke-umezawa/chainer,anaruse/chainer,niboshi/chainer,okuta/chainer,kashif/chainer,ronekko/chainer
|
Add test for OWNDATA flag
|
import unittest
from cupy import core
from cupy import testing
@testing.gpu
class TestArrayOwndata(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.a = core.ndarray(())
def test_original_array(self):
self.assertTrue(self.a.flags.owndata)
def test_view_array(self):
v = self.a.view()
self.assertFalse(v.flags.owndata)
def test_reshaped_array(self):
r = self.a.reshape(())
self.assertFalse(r.flags.owndata)
|
<commit_before><commit_msg>Add test for OWNDATA flag<commit_after>
|
import unittest
from cupy import core
from cupy import testing
@testing.gpu
class TestArrayOwndata(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.a = core.ndarray(())
def test_original_array(self):
self.assertTrue(self.a.flags.owndata)
def test_view_array(self):
v = self.a.view()
self.assertFalse(v.flags.owndata)
def test_reshaped_array(self):
r = self.a.reshape(())
self.assertFalse(r.flags.owndata)
|
Add test for OWNDATA flagimport unittest
from cupy import core
from cupy import testing
@testing.gpu
class TestArrayOwndata(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.a = core.ndarray(())
def test_original_array(self):
self.assertTrue(self.a.flags.owndata)
def test_view_array(self):
v = self.a.view()
self.assertFalse(v.flags.owndata)
def test_reshaped_array(self):
r = self.a.reshape(())
self.assertFalse(r.flags.owndata)
|
<commit_before><commit_msg>Add test for OWNDATA flag<commit_after>import unittest
from cupy import core
from cupy import testing
@testing.gpu
class TestArrayOwndata(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.a = core.ndarray(())
def test_original_array(self):
self.assertTrue(self.a.flags.owndata)
def test_view_array(self):
v = self.a.view()
self.assertFalse(v.flags.owndata)
def test_reshaped_array(self):
r = self.a.reshape(())
self.assertFalse(r.flags.owndata)
|
|
06e7dbb230790127d965b3090994211adef7ad5e
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
tests/unit/python/foglamp/tasks/purge/test_purge.py
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
from unittest.mock import patch, call
import pytest
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.common.statistics import Statistics
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
class TestPurge:
def test_write_statistics(self):
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, '__init__', return_value=None):
with patch.object(Statistics, 'update', return_value=asyncio.ensure_future(asyncio.sleep(0.1)))\
as mock_update:
p = Purge()
p.write_statistics(1, 2)
mock_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
def test_insert_into_log(self):
pass
def test_set_configuration(self):
pass
def test_purge_data(self):
pass
def test_run(self):
pass
|
Test added for unit write_statistics
|
Test added for unit write_statistics
|
Python
|
apache-2.0
|
foglamp/FogLAMP,foglamp/FogLAMP,foglamp/FogLAMP,foglamp/FogLAMP
|
Test added for unit write_statistics
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
from unittest.mock import patch, call
import pytest
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.common.statistics import Statistics
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
class TestPurge:
def test_write_statistics(self):
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, '__init__', return_value=None):
with patch.object(Statistics, 'update', return_value=asyncio.ensure_future(asyncio.sleep(0.1)))\
as mock_update:
p = Purge()
p.write_statistics(1, 2)
mock_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
def test_insert_into_log(self):
pass
def test_set_configuration(self):
pass
def test_purge_data(self):
pass
def test_run(self):
pass
|
<commit_before><commit_msg>Test added for unit write_statistics<commit_after>
|
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
from unittest.mock import patch, call
import pytest
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.common.statistics import Statistics
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
class TestPurge:
def test_write_statistics(self):
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, '__init__', return_value=None):
with patch.object(Statistics, 'update', return_value=asyncio.ensure_future(asyncio.sleep(0.1)))\
as mock_update:
p = Purge()
p.write_statistics(1, 2)
mock_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
def test_insert_into_log(self):
pass
def test_set_configuration(self):
pass
def test_purge_data(self):
pass
def test_run(self):
pass
|
Test added for unit write_statistics# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
from unittest.mock import patch, call
import pytest
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.common.statistics import Statistics
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
class TestPurge:
def test_write_statistics(self):
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, '__init__', return_value=None):
with patch.object(Statistics, 'update', return_value=asyncio.ensure_future(asyncio.sleep(0.1)))\
as mock_update:
p = Purge()
p.write_statistics(1, 2)
mock_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
def test_insert_into_log(self):
pass
def test_set_configuration(self):
pass
def test_purge_data(self):
pass
def test_run(self):
pass
|
<commit_before><commit_msg>Test added for unit write_statistics<commit_after># -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import asyncio
from unittest.mock import patch, call
import pytest
from foglamp.common.storage_client.storage_client import StorageClient
from foglamp.common.statistics import Statistics
from foglamp.tasks.purge.purge import Purge
from foglamp.common.process import FoglampProcess
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
@pytest.allure.feature("unit")
@pytest.allure.story("tasks", "purge")
class TestPurge:
def test_write_statistics(self):
with patch.object(FoglampProcess, '__init__'):
with patch.object(Statistics, '__init__', return_value=None):
with patch.object(Statistics, 'update', return_value=asyncio.ensure_future(asyncio.sleep(0.1)))\
as mock_update:
p = Purge()
p.write_statistics(1, 2)
mock_update.assert_has_calls([call('PURGED', 1), call('UNSNPURGED', 2)])
def test_insert_into_log(self):
pass
def test_set_configuration(self):
pass
def test_purge_data(self):
pass
def test_run(self):
pass
|
|
a645f5c2991bcdf3ff8581a9fa3afe456004320f
|
casepro/cases/migrations/0043_case_user_assignee.py
|
casepro/cases/migrations/0043_case_user_assignee.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cases', '0042_auto_20160805_1003'),
]
operations = [
migrations.AddField(
model_name='case',
name='user_assignee',
field=models.ForeignKey(related_name='cases', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, help_text='The (optional) user that this case is assigned to', null=True),
),
]
|
Add migration for user_assignee Case field
|
Add migration for user_assignee Case field
|
Python
|
bsd-3-clause
|
praekelt/casepro,praekelt/casepro,rapidpro/casepro,xkmato/casepro,praekelt/casepro,rapidpro/casepro,xkmato/casepro,rapidpro/casepro
|
Add migration for user_assignee Case field
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cases', '0042_auto_20160805_1003'),
]
operations = [
migrations.AddField(
model_name='case',
name='user_assignee',
field=models.ForeignKey(related_name='cases', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, help_text='The (optional) user that this case is assigned to', null=True),
),
]
|
<commit_before><commit_msg>Add migration for user_assignee Case field<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cases', '0042_auto_20160805_1003'),
]
operations = [
migrations.AddField(
model_name='case',
name='user_assignee',
field=models.ForeignKey(related_name='cases', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, help_text='The (optional) user that this case is assigned to', null=True),
),
]
|
Add migration for user_assignee Case field# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cases', '0042_auto_20160805_1003'),
]
operations = [
migrations.AddField(
model_name='case',
name='user_assignee',
field=models.ForeignKey(related_name='cases', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, help_text='The (optional) user that this case is assigned to', null=True),
),
]
|
<commit_before><commit_msg>Add migration for user_assignee Case field<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cases', '0042_auto_20160805_1003'),
]
operations = [
migrations.AddField(
model_name='case',
name='user_assignee',
field=models.ForeignKey(related_name='cases', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, help_text='The (optional) user that this case is assigned to', null=True),
),
]
|
|
81ed84afcc4a2cb38abaf48ca06c58b3352a6008
|
scheduler/migrations/0039_delete_workdone.py
|
scheduler/migrations/0039_delete_workdone.py
|
# Generated by Django 2.2.3 on 2022-03-12 09:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0038_protect_deletion'),
]
operations = [
migrations.DeleteModel(
name='WorkDone',
),
]
|
Add missing migration for model WorkDone
|
Add missing migration for model WorkDone
|
Python
|
agpl-3.0
|
coders4help/volunteer_planner,pitpalme/volunteer_planner,pitpalme/volunteer_planner,volunteer-planner/volunteer_planner,volunteer-planner/volunteer_planner,christophmeissner/volunteer_planner,coders4help/volunteer_planner,coders4help/volunteer_planner,christophmeissner/volunteer_planner,coders4help/volunteer_planner,volunteer-planner/volunteer_planner,pitpalme/volunteer_planner,christophmeissner/volunteer_planner,pitpalme/volunteer_planner,volunteer-planner/volunteer_planner,christophmeissner/volunteer_planner
|
Add missing migration for model WorkDone
|
# Generated by Django 2.2.3 on 2022-03-12 09:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0038_protect_deletion'),
]
operations = [
migrations.DeleteModel(
name='WorkDone',
),
]
|
<commit_before><commit_msg>Add missing migration for model WorkDone<commit_after>
|
# Generated by Django 2.2.3 on 2022-03-12 09:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0038_protect_deletion'),
]
operations = [
migrations.DeleteModel(
name='WorkDone',
),
]
|
Add missing migration for model WorkDone# Generated by Django 2.2.3 on 2022-03-12 09:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0038_protect_deletion'),
]
operations = [
migrations.DeleteModel(
name='WorkDone',
),
]
|
<commit_before><commit_msg>Add missing migration for model WorkDone<commit_after># Generated by Django 2.2.3 on 2022-03-12 09:54
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0038_protect_deletion'),
]
operations = [
migrations.DeleteModel(
name='WorkDone',
),
]
|
|
95bf6837719533b5621faf57753bf99778995e5f
|
contrib/conflict_resolution/resolve_interactively.py
|
contrib/conflict_resolution/resolve_interactively.py
|
#!/usr/bin/env python3
"""Ask user to resolve a vdirsyncer sync conflict interactively.
Needs a way to ask the user.
The use of https://apps.kde.org/kdialog/ for GNU/Linix is hardcoded.
Depends on python>3.5 and KDialog.
Usage:
Ensure the file executable and use it in the vdirsyncer.conf file, e.g.
conflict_resolution = ["command", "/home/bern/vdirsyncer/resolve_interactively.py"]
This file is Free Software under the following license:
SPDX-License-Identifier: Apache-2.0
SPDX-FileCopyrightText: 2021 Intevation GmbH <https://intevation.de>
Author: <bernhard.reiter@intevation.de>
"""
from pathlib import Path
import re
import subprocess
import sys
KDIALOG = "/usr/bin/kdialog"
SUMMARY_PATTERN = re.compile("^(SUMMARY:.*)$", re.MULTILINE)
def get_summary(icalendar_text:str):
"""Get the first SUMMARY: line from an iCalendar text.
Do not care about the line being continued.
"""
match = re.search(SUMMARY_PATTERN, icalendar_text)
return match[1]
def main(ical1_filename, ical2_filename):
ical1 = ical1_filename.read_text()
ical2 = ical2_filename.read_text()
additional_args = ["--yes-label", "take first"] # return code == 0
additional_args += ["--no-label", "take second"] # return code == 1
additional_args += ["--cancel-label", "do not resolve"] # return code == 2
r = subprocess.run(args = [
KDIALOG,
"--warningyesnocancel",
"There was a sync conflict, do you prefer the first entry: \n" +
get_summary(ical1) + "...\n(full contents: " + str(ical1_filename) +
" )\n\nor the second entry: \n" +
get_summary(ical2) + "...\n(full contents: " + str(ical2_filename) +
" )?"
] + additional_args)
if r.returncode == 2:
# cancel was pressed
return # shall lead to items not changed, because not copied
if r.returncode == 0:
# we want to take the first item, so overwrite the second
ical2_filename.write_text(ical1)
else: # r.returncode == 1, we want the second item, so overwrite the first
ical1_filename.write_text(ical2)
if len(sys.argv) != 3:
sys.stdout.write(__doc__)
else:
main(Path(sys.argv[1]), Path(sys.argv[2]))
|
Add contrib script to resolve conflicts
|
Add contrib script to resolve conflicts
Asks the user when two iCalender objects conflict during a sync,
which one to take.
|
Python
|
mit
|
untitaker/vdirsyncer,untitaker/vdirsyncer,untitaker/vdirsyncer
|
Add contrib script to resolve conflicts
Asks the user when two iCalender objects conflict during a sync,
which one to take.
|
#!/usr/bin/env python3
"""Ask user to resolve a vdirsyncer sync conflict interactively.
Needs a way to ask the user.
The use of https://apps.kde.org/kdialog/ for GNU/Linix is hardcoded.
Depends on python>3.5 and KDialog.
Usage:
Ensure the file executable and use it in the vdirsyncer.conf file, e.g.
conflict_resolution = ["command", "/home/bern/vdirsyncer/resolve_interactively.py"]
This file is Free Software under the following license:
SPDX-License-Identifier: Apache-2.0
SPDX-FileCopyrightText: 2021 Intevation GmbH <https://intevation.de>
Author: <bernhard.reiter@intevation.de>
"""
from pathlib import Path
import re
import subprocess
import sys
KDIALOG = "/usr/bin/kdialog"
SUMMARY_PATTERN = re.compile("^(SUMMARY:.*)$", re.MULTILINE)
def get_summary(icalendar_text:str):
"""Get the first SUMMARY: line from an iCalendar text.
Do not care about the line being continued.
"""
match = re.search(SUMMARY_PATTERN, icalendar_text)
return match[1]
def main(ical1_filename, ical2_filename):
ical1 = ical1_filename.read_text()
ical2 = ical2_filename.read_text()
additional_args = ["--yes-label", "take first"] # return code == 0
additional_args += ["--no-label", "take second"] # return code == 1
additional_args += ["--cancel-label", "do not resolve"] # return code == 2
r = subprocess.run(args = [
KDIALOG,
"--warningyesnocancel",
"There was a sync conflict, do you prefer the first entry: \n" +
get_summary(ical1) + "...\n(full contents: " + str(ical1_filename) +
" )\n\nor the second entry: \n" +
get_summary(ical2) + "...\n(full contents: " + str(ical2_filename) +
" )?"
] + additional_args)
if r.returncode == 2:
# cancel was pressed
return # shall lead to items not changed, because not copied
if r.returncode == 0:
# we want to take the first item, so overwrite the second
ical2_filename.write_text(ical1)
else: # r.returncode == 1, we want the second item, so overwrite the first
ical1_filename.write_text(ical2)
if len(sys.argv) != 3:
sys.stdout.write(__doc__)
else:
main(Path(sys.argv[1]), Path(sys.argv[2]))
|
<commit_before><commit_msg>Add contrib script to resolve conflicts
Asks the user when two iCalender objects conflict during a sync,
which one to take.<commit_after>
|
#!/usr/bin/env python3
"""Ask user to resolve a vdirsyncer sync conflict interactively.
Needs a way to ask the user.
The use of https://apps.kde.org/kdialog/ for GNU/Linix is hardcoded.
Depends on python>3.5 and KDialog.
Usage:
Ensure the file executable and use it in the vdirsyncer.conf file, e.g.
conflict_resolution = ["command", "/home/bern/vdirsyncer/resolve_interactively.py"]
This file is Free Software under the following license:
SPDX-License-Identifier: Apache-2.0
SPDX-FileCopyrightText: 2021 Intevation GmbH <https://intevation.de>
Author: <bernhard.reiter@intevation.de>
"""
from pathlib import Path
import re
import subprocess
import sys
KDIALOG = "/usr/bin/kdialog"
SUMMARY_PATTERN = re.compile("^(SUMMARY:.*)$", re.MULTILINE)
def get_summary(icalendar_text:str):
"""Get the first SUMMARY: line from an iCalendar text.
Do not care about the line being continued.
"""
match = re.search(SUMMARY_PATTERN, icalendar_text)
return match[1]
def main(ical1_filename, ical2_filename):
ical1 = ical1_filename.read_text()
ical2 = ical2_filename.read_text()
additional_args = ["--yes-label", "take first"] # return code == 0
additional_args += ["--no-label", "take second"] # return code == 1
additional_args += ["--cancel-label", "do not resolve"] # return code == 2
r = subprocess.run(args = [
KDIALOG,
"--warningyesnocancel",
"There was a sync conflict, do you prefer the first entry: \n" +
get_summary(ical1) + "...\n(full contents: " + str(ical1_filename) +
" )\n\nor the second entry: \n" +
get_summary(ical2) + "...\n(full contents: " + str(ical2_filename) +
" )?"
] + additional_args)
if r.returncode == 2:
# cancel was pressed
return # shall lead to items not changed, because not copied
if r.returncode == 0:
# we want to take the first item, so overwrite the second
ical2_filename.write_text(ical1)
else: # r.returncode == 1, we want the second item, so overwrite the first
ical1_filename.write_text(ical2)
if len(sys.argv) != 3:
sys.stdout.write(__doc__)
else:
main(Path(sys.argv[1]), Path(sys.argv[2]))
|
Add contrib script to resolve conflicts
Asks the user when two iCalender objects conflict during a sync,
which one to take.#!/usr/bin/env python3
"""Ask user to resolve a vdirsyncer sync conflict interactively.
Needs a way to ask the user.
The use of https://apps.kde.org/kdialog/ for GNU/Linix is hardcoded.
Depends on python>3.5 and KDialog.
Usage:
Ensure the file executable and use it in the vdirsyncer.conf file, e.g.
conflict_resolution = ["command", "/home/bern/vdirsyncer/resolve_interactively.py"]
This file is Free Software under the following license:
SPDX-License-Identifier: Apache-2.0
SPDX-FileCopyrightText: 2021 Intevation GmbH <https://intevation.de>
Author: <bernhard.reiter@intevation.de>
"""
from pathlib import Path
import re
import subprocess
import sys
KDIALOG = "/usr/bin/kdialog"
SUMMARY_PATTERN = re.compile("^(SUMMARY:.*)$", re.MULTILINE)
def get_summary(icalendar_text:str):
"""Get the first SUMMARY: line from an iCalendar text.
Do not care about the line being continued.
"""
match = re.search(SUMMARY_PATTERN, icalendar_text)
return match[1]
def main(ical1_filename, ical2_filename):
ical1 = ical1_filename.read_text()
ical2 = ical2_filename.read_text()
additional_args = ["--yes-label", "take first"] # return code == 0
additional_args += ["--no-label", "take second"] # return code == 1
additional_args += ["--cancel-label", "do not resolve"] # return code == 2
r = subprocess.run(args = [
KDIALOG,
"--warningyesnocancel",
"There was a sync conflict, do you prefer the first entry: \n" +
get_summary(ical1) + "...\n(full contents: " + str(ical1_filename) +
" )\n\nor the second entry: \n" +
get_summary(ical2) + "...\n(full contents: " + str(ical2_filename) +
" )?"
] + additional_args)
if r.returncode == 2:
# cancel was pressed
return # shall lead to items not changed, because not copied
if r.returncode == 0:
# we want to take the first item, so overwrite the second
ical2_filename.write_text(ical1)
else: # r.returncode == 1, we want the second item, so overwrite the first
ical1_filename.write_text(ical2)
if len(sys.argv) != 3:
sys.stdout.write(__doc__)
else:
main(Path(sys.argv[1]), Path(sys.argv[2]))
|
<commit_before><commit_msg>Add contrib script to resolve conflicts
Asks the user when two iCalender objects conflict during a sync,
which one to take.<commit_after>#!/usr/bin/env python3
"""Ask user to resolve a vdirsyncer sync conflict interactively.
Needs a way to ask the user.
The use of https://apps.kde.org/kdialog/ for GNU/Linix is hardcoded.
Depends on python>3.5 and KDialog.
Usage:
Ensure the file executable and use it in the vdirsyncer.conf file, e.g.
conflict_resolution = ["command", "/home/bern/vdirsyncer/resolve_interactively.py"]
This file is Free Software under the following license:
SPDX-License-Identifier: Apache-2.0
SPDX-FileCopyrightText: 2021 Intevation GmbH <https://intevation.de>
Author: <bernhard.reiter@intevation.de>
"""
from pathlib import Path
import re
import subprocess
import sys
KDIALOG = "/usr/bin/kdialog"
SUMMARY_PATTERN = re.compile("^(SUMMARY:.*)$", re.MULTILINE)
def get_summary(icalendar_text:str):
"""Get the first SUMMARY: line from an iCalendar text.
Do not care about the line being continued.
"""
match = re.search(SUMMARY_PATTERN, icalendar_text)
return match[1]
def main(ical1_filename, ical2_filename):
ical1 = ical1_filename.read_text()
ical2 = ical2_filename.read_text()
additional_args = ["--yes-label", "take first"] # return code == 0
additional_args += ["--no-label", "take second"] # return code == 1
additional_args += ["--cancel-label", "do not resolve"] # return code == 2
r = subprocess.run(args = [
KDIALOG,
"--warningyesnocancel",
"There was a sync conflict, do you prefer the first entry: \n" +
get_summary(ical1) + "...\n(full contents: " + str(ical1_filename) +
" )\n\nor the second entry: \n" +
get_summary(ical2) + "...\n(full contents: " + str(ical2_filename) +
" )?"
] + additional_args)
if r.returncode == 2:
# cancel was pressed
return # shall lead to items not changed, because not copied
if r.returncode == 0:
# we want to take the first item, so overwrite the second
ical2_filename.write_text(ical1)
else: # r.returncode == 1, we want the second item, so overwrite the first
ical1_filename.write_text(ical2)
if len(sys.argv) != 3:
sys.stdout.write(__doc__)
else:
main(Path(sys.argv[1]), Path(sys.argv[2]))
|
|
6b5774d671e311e7d3b107248937285ace0c4eb4
|
Luna/Model.py
|
Luna/Model.py
|
#!/usr/bin/env python
#This is free and unencumbered software released into the public domain.
#
#Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
#software, either in source code form or as a compiled binary, for any purpose,
#commercial or non-commercial, and by any means.
#
#In jurisdictions that recognize copyright laws, the author or authors of this
#software dedicate any and all copyright interest in the software to the public
#domain. We make this dedication for the benefit of the public at large and to
#the detriment of our heirs and successors. We intend this dedication to be an
#overt act of relinquishment in perpetuity of all present and future rights to
#this software under copyright law.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
#ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#For more information, please refer to <https://unlicense.org/>
"""
Provides a system for the model part of the model-view-presenter paradigm.
For a safe model system, every model should follow the following rules:
- All data must be private inside a class (with field names starting with two
underscores).
- All functions that change the data must have the ``setter`` decorator.
"""
import weakref #To automatically remove listeners and signallers if their class instances are removed.
__listeners = weakref.WeakKeyDictionary()
"""
For each function, a set of functions that need to be called whenever the method
is called.
Entries of the dictionary will automatically get garbage collected once all
strong references to their class instances are removed. Entries of each listener
set will automatically get garbage collected once all strong references to their
class instances are removed.
"""
def setter(setterFunction):
"""
.. function:: signal(setterFunction)
Decorator indicating that a function can be registered with listeners.
This decorator should be used for any method that changes the data in the
model.
:param setterFunction: The function to allow registering listeners with.
:return: A new function that calls all listeners after calling the setter.
"""
global __listeners
if not setterFunction in __listeners: #Make an entry to track listeners of this function.
__listeners[setterFunction] = weakref.WeakSet()
def newSetter(*args,**kwargs):
setterFunction(*args,**kwargs)
for listener in __listeners[setterFunction]: #Call all listeners.
listener()
return newSetter
|
Add model with observer pattern
|
Add model with observer pattern
Intended for classes that contain model data, this new module will
define some structural things to implement a model-view-presenter
pattern.
|
Python
|
cc0-1.0
|
Ghostkeeper/Luna
|
Add model with observer pattern
Intended for classes that contain model data, this new module will
define some structural things to implement a model-view-presenter
pattern.
|
#!/usr/bin/env python
#This is free and unencumbered software released into the public domain.
#
#Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
#software, either in source code form or as a compiled binary, for any purpose,
#commercial or non-commercial, and by any means.
#
#In jurisdictions that recognize copyright laws, the author or authors of this
#software dedicate any and all copyright interest in the software to the public
#domain. We make this dedication for the benefit of the public at large and to
#the detriment of our heirs and successors. We intend this dedication to be an
#overt act of relinquishment in perpetuity of all present and future rights to
#this software under copyright law.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
#ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#For more information, please refer to <https://unlicense.org/>
"""
Provides a system for the model part of the model-view-presenter paradigm.
For a safe model system, every model should follow the following rules:
- All data must be private inside a class (with field names starting with two
underscores).
- All functions that change the data must have the ``setter`` decorator.
"""
import weakref #To automatically remove listeners and signallers if their class instances are removed.
__listeners = weakref.WeakKeyDictionary()
"""
For each function, a set of functions that need to be called whenever the method
is called.
Entries of the dictionary will automatically get garbage collected once all
strong references to their class instances are removed. Entries of each listener
set will automatically get garbage collected once all strong references to their
class instances are removed.
"""
def setter(setterFunction):
"""
.. function:: signal(setterFunction)
Decorator indicating that a function can be registered with listeners.
This decorator should be used for any method that changes the data in the
model.
:param setterFunction: The function to allow registering listeners with.
:return: A new function that calls all listeners after calling the setter.
"""
global __listeners
if not setterFunction in __listeners: #Make an entry to track listeners of this function.
__listeners[setterFunction] = weakref.WeakSet()
def newSetter(*args,**kwargs):
setterFunction(*args,**kwargs)
for listener in __listeners[setterFunction]: #Call all listeners.
listener()
return newSetter
|
<commit_before><commit_msg>Add model with observer pattern
Intended for classes that contain model data, this new module will
define some structural things to implement a model-view-presenter
pattern.<commit_after>
|
#!/usr/bin/env python
#This is free and unencumbered software released into the public domain.
#
#Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
#software, either in source code form or as a compiled binary, for any purpose,
#commercial or non-commercial, and by any means.
#
#In jurisdictions that recognize copyright laws, the author or authors of this
#software dedicate any and all copyright interest in the software to the public
#domain. We make this dedication for the benefit of the public at large and to
#the detriment of our heirs and successors. We intend this dedication to be an
#overt act of relinquishment in perpetuity of all present and future rights to
#this software under copyright law.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
#ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#For more information, please refer to <https://unlicense.org/>
"""
Provides a system for the model part of the model-view-presenter paradigm.
For a safe model system, every model should follow the following rules:
- All data must be private inside a class (with field names starting with two
underscores).
- All functions that change the data must have the ``setter`` decorator.
"""
import weakref #To automatically remove listeners and signallers if their class instances are removed.
__listeners = weakref.WeakKeyDictionary()
"""
For each function, a set of functions that need to be called whenever the method
is called.
Entries of the dictionary will automatically get garbage collected once all
strong references to their class instances are removed. Entries of each listener
set will automatically get garbage collected once all strong references to their
class instances are removed.
"""
def setter(setterFunction):
"""
.. function:: signal(setterFunction)
Decorator indicating that a function can be registered with listeners.
This decorator should be used for any method that changes the data in the
model.
:param setterFunction: The function to allow registering listeners with.
:return: A new function that calls all listeners after calling the setter.
"""
global __listeners
if not setterFunction in __listeners: #Make an entry to track listeners of this function.
__listeners[setterFunction] = weakref.WeakSet()
def newSetter(*args,**kwargs):
setterFunction(*args,**kwargs)
for listener in __listeners[setterFunction]: #Call all listeners.
listener()
return newSetter
|
Add model with observer pattern
Intended for classes that contain model data, this new module will
define some structural things to implement a model-view-presenter
pattern.#!/usr/bin/env python
#This is free and unencumbered software released into the public domain.
#
#Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
#software, either in source code form or as a compiled binary, for any purpose,
#commercial or non-commercial, and by any means.
#
#In jurisdictions that recognize copyright laws, the author or authors of this
#software dedicate any and all copyright interest in the software to the public
#domain. We make this dedication for the benefit of the public at large and to
#the detriment of our heirs and successors. We intend this dedication to be an
#overt act of relinquishment in perpetuity of all present and future rights to
#this software under copyright law.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
#ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#For more information, please refer to <https://unlicense.org/>
"""
Provides a system for the model part of the model-view-presenter paradigm.
For a safe model system, every model should follow the following rules:
- All data must be private inside a class (with field names starting with two
underscores).
- All functions that change the data must have the ``setter`` decorator.
"""
import weakref #To automatically remove listeners and signallers if their class instances are removed.
__listeners = weakref.WeakKeyDictionary()
"""
For each function, a set of functions that need to be called whenever the method
is called.
Entries of the dictionary will automatically get garbage collected once all
strong references to their class instances are removed. Entries of each listener
set will automatically get garbage collected once all strong references to their
class instances are removed.
"""
def setter(setterFunction):
"""
.. function:: signal(setterFunction)
Decorator indicating that a function can be registered with listeners.
This decorator should be used for any method that changes the data in the
model.
:param setterFunction: The function to allow registering listeners with.
:return: A new function that calls all listeners after calling the setter.
"""
global __listeners
if not setterFunction in __listeners: #Make an entry to track listeners of this function.
__listeners[setterFunction] = weakref.WeakSet()
def newSetter(*args,**kwargs):
setterFunction(*args,**kwargs)
for listener in __listeners[setterFunction]: #Call all listeners.
listener()
return newSetter
|
<commit_before><commit_msg>Add model with observer pattern
Intended for classes that contain model data, this new module will
define some structural things to implement a model-view-presenter
pattern.<commit_after>#!/usr/bin/env python
#This is free and unencumbered software released into the public domain.
#
#Anyone is free to copy, modify, publish, use, compile, sell, or distribute this
#software, either in source code form or as a compiled binary, for any purpose,
#commercial or non-commercial, and by any means.
#
#In jurisdictions that recognize copyright laws, the author or authors of this
#software dedicate any and all copyright interest in the software to the public
#domain. We make this dedication for the benefit of the public at large and to
#the detriment of our heirs and successors. We intend this dedication to be an
#overt act of relinquishment in perpetuity of all present and future rights to
#this software under copyright law.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
#ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
#WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
#For more information, please refer to <https://unlicense.org/>
"""
Provides a system for the model part of the model-view-presenter paradigm.
For a safe model system, every model should follow the following rules:
- All data must be private inside a class (with field names starting with two
underscores).
- All functions that change the data must have the ``setter`` decorator.
"""
import weakref #To automatically remove listeners and signallers if their class instances are removed.
__listeners = weakref.WeakKeyDictionary()
"""
For each function, a set of functions that need to be called whenever the method
is called.
Entries of the dictionary will automatically get garbage collected once all
strong references to their class instances are removed. Entries of each listener
set will automatically get garbage collected once all strong references to their
class instances are removed.
"""
def setter(setterFunction):
"""
.. function:: signal(setterFunction)
Decorator indicating that a function can be registered with listeners.
This decorator should be used for any method that changes the data in the
model.
:param setterFunction: The function to allow registering listeners with.
:return: A new function that calls all listeners after calling the setter.
"""
global __listeners
if not setterFunction in __listeners: #Make an entry to track listeners of this function.
__listeners[setterFunction] = weakref.WeakSet()
def newSetter(*args,**kwargs):
setterFunction(*args,**kwargs)
for listener in __listeners[setterFunction]: #Call all listeners.
listener()
return newSetter
|
|
897ef3903de2b7b67780409c3280700aabe14c0f
|
search/binary_search/python/recursive_binary_search.py
|
search/binary_search/python/recursive_binary_search.py
|
# recursive binary search
# return, if present, the position of the searched element
# returns -1 if the element is not present
def recursive_binary_search(n, e, i, j):
if i > j:
return -1
half = (i+j)/2
if e == n[half]:
return half
elif e < n[half]:
return binary_search(n, e, i, half-1)
elif e > n[half]:
return binary_search(n, e, half+1, j)
def test():
a_list = [1,5,7,8,13,17,21,27,33,37,40]
item = 21
start = 0
end = len(a_list)-1
assert recursive_binary_search(a_list, item, start, end) == 6
if __name__ == "__main__":
test()
|
Add recursive binary search in python
|
Add recursive binary search in python
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Add recursive binary search in python
|
# recursive binary search
# return, if present, the position of the searched element
# returns -1 if the element is not present
def recursive_binary_search(n, e, i, j):
if i > j:
return -1
half = (i+j)/2
if e == n[half]:
return half
elif e < n[half]:
return binary_search(n, e, i, half-1)
elif e > n[half]:
return binary_search(n, e, half+1, j)
def test():
a_list = [1,5,7,8,13,17,21,27,33,37,40]
item = 21
start = 0
end = len(a_list)-1
assert recursive_binary_search(a_list, item, start, end) == 6
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>Add recursive binary search in python<commit_after>
|
# recursive binary search
# return, if present, the position of the searched element
# returns -1 if the element is not present
def recursive_binary_search(n, e, i, j):
if i > j:
return -1
half = (i+j)/2
if e == n[half]:
return half
elif e < n[half]:
return binary_search(n, e, i, half-1)
elif e > n[half]:
return binary_search(n, e, half+1, j)
def test():
a_list = [1,5,7,8,13,17,21,27,33,37,40]
item = 21
start = 0
end = len(a_list)-1
assert recursive_binary_search(a_list, item, start, end) == 6
if __name__ == "__main__":
test()
|
Add recursive binary search in python# recursive binary search
# return, if present, the position of the searched element
# returns -1 if the element is not present
def recursive_binary_search(n, e, i, j):
if i > j:
return -1
half = (i+j)/2
if e == n[half]:
return half
elif e < n[half]:
return binary_search(n, e, i, half-1)
elif e > n[half]:
return binary_search(n, e, half+1, j)
def test():
a_list = [1,5,7,8,13,17,21,27,33,37,40]
item = 21
start = 0
end = len(a_list)-1
assert recursive_binary_search(a_list, item, start, end) == 6
if __name__ == "__main__":
test()
|
<commit_before><commit_msg>Add recursive binary search in python<commit_after># recursive binary search
# return, if present, the position of the searched element
# returns -1 if the element is not present
def recursive_binary_search(n, e, i, j):
if i > j:
return -1
half = (i+j)/2
if e == n[half]:
return half
elif e < n[half]:
return binary_search(n, e, i, half-1)
elif e > n[half]:
return binary_search(n, e, half+1, j)
def test():
a_list = [1,5,7,8,13,17,21,27,33,37,40]
item = 21
start = 0
end = len(a_list)-1
assert recursive_binary_search(a_list, item, start, end) == 6
if __name__ == "__main__":
test()
|
|
67e62a9ee88ff432f641e5c6ea9bc2244264f7ad
|
seahub/api2/authentication.py
|
seahub/api2/authentication.py
|
from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
user = User.objects.get(email=token.user)
if user.is_active:
return (user, token)
|
from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
return None
if user.is_active:
return (user, token)
|
Fix for if token is valid format, but user with that token does not exist
|
Fix for if token is valid format, but user with that token does not exist
|
Python
|
apache-2.0
|
Chilledheart/seahub,madflow/seahub,madflow/seahub,madflow/seahub,madflow/seahub,madflow/seahub,Chilledheart/seahub,miurahr/seahub,cloudcopy/seahub,Chilledheart/seahub,miurahr/seahub,cloudcopy/seahub,Chilledheart/seahub,cloudcopy/seahub,miurahr/seahub,cloudcopy/seahub,Chilledheart/seahub,miurahr/seahub
|
from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
user = User.objects.get(email=token.user)
if user.is_active:
return (user, token)
Fix for if token is valid format, but user with that token does not exist
|
from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
return None
if user.is_active:
return (user, token)
|
<commit_before>from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
user = User.objects.get(email=token.user)
if user.is_active:
return (user, token)
<commit_msg>Fix for if token is valid format, but user with that token does not exist<commit_after>
|
from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
return None
if user.is_active:
return (user, token)
|
from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
user = User.objects.get(email=token.user)
if user.is_active:
return (user, token)
Fix for if token is valid format, but user with that token does not existfrom rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
return None
if user.is_active:
return (user, token)
|
<commit_before>from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
user = User.objects.get(email=token.user)
if user.is_active:
return (user, token)
<commit_msg>Fix for if token is valid format, but user with that token does not exist<commit_after>from rest_framework.authentication import BaseAuthentication
from models import Token
from seahub.base.accounts import User
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if len(auth) == 2 and auth[0].lower() == "token":
key = auth[1]
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
return None
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
return None
if user.is_active:
return (user, token)
|
49416a5bc0ddbf166f3a835ac1f612390d19971b
|
scripts/unittest/script/22-frozen_modules.py
|
scripts/unittest/script/22-frozen_modules.py
|
def unittest(data_path, temp_path):
import ulinalg
from vec import distance
m = ulinalg.ones(1, 10)
d = distance.euclidean((0, 0, 0), (0, 1, 0))
return (abs(1.0-d) < 0.01) and (abs(10.0 - sum(m)) < 0.01)
|
Add unit-test to test frozen modules.
|
Add unit-test to test frozen modules.
|
Python
|
mit
|
openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv
|
Add unit-test to test frozen modules.
|
def unittest(data_path, temp_path):
import ulinalg
from vec import distance
m = ulinalg.ones(1, 10)
d = distance.euclidean((0, 0, 0), (0, 1, 0))
return (abs(1.0-d) < 0.01) and (abs(10.0 - sum(m)) < 0.01)
|
<commit_before><commit_msg>Add unit-test to test frozen modules.<commit_after>
|
def unittest(data_path, temp_path):
import ulinalg
from vec import distance
m = ulinalg.ones(1, 10)
d = distance.euclidean((0, 0, 0), (0, 1, 0))
return (abs(1.0-d) < 0.01) and (abs(10.0 - sum(m)) < 0.01)
|
Add unit-test to test frozen modules.def unittest(data_path, temp_path):
import ulinalg
from vec import distance
m = ulinalg.ones(1, 10)
d = distance.euclidean((0, 0, 0), (0, 1, 0))
return (abs(1.0-d) < 0.01) and (abs(10.0 - sum(m)) < 0.01)
|
<commit_before><commit_msg>Add unit-test to test frozen modules.<commit_after>def unittest(data_path, temp_path):
import ulinalg
from vec import distance
m = ulinalg.ones(1, 10)
d = distance.euclidean((0, 0, 0), (0, 1, 0))
return (abs(1.0-d) < 0.01) and (abs(10.0 - sum(m)) < 0.01)
|
|
93751683a1ac19667f2a221ead5142f9bc0a8d32
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandGetPersonByRecognitionId.py
|
sara_flexbe_states/src/sara_flexbe_states/WonderlandGetPersonByRecognitionId.py
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
from sara_msgs.msg import Entity
"""
Created on 15/05/2018
@author: Lucas Maurice
"""
class WonderlandGetPersonByRecognitionId(EventState):
'''
Find a person by ID.
># id int Recognition name of the object
#> entity sara_msgs/Entity
<= done return when one entity exist
<= none return when no entity exist
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandGetPersonByRecognitionId, self).__init__(outcomes=['done', 'none', 'error'],
input_keys=['id'], output_keys=['entity'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/?peopleRecognitionId=" + str(userdata.id)
# try the request
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
# parse parameter json data
data = json.loads(response.content)
loginfo(data)
if 'peopleId' in data:
userdata.entity = self.generate_entity(data)
return 'done'
else:
return 'none'
@staticmethod
def generate_entity(data):
entity = Entity()
entity.wonderlandId = data['peopleId']
entity.face.id = data['peopleRecognitionId']
if 'peopleColor' in data and data['peopleColor'] is not None:
entity.color = data['peopleColor'].encode('ascii', 'ignore')
if 'peopleName' in data:
entity.aliases.append(data['peopleName'].encode('ascii', 'ignore'))
if 'peoplePose' in data and data['peoplePose'] is not None:
entity.pose = data['peoplePose'].encode('ascii', 'ignore')
entity.poseProbability = data['peopleGenderAccuracy']
if 'peopleGender' in data and data['peopleGender'] is not None:
entity.face.gender = data['peopleGender'].encode('ascii', 'ignore')
entity.face.genderProbability = data['peopleGenderAccuracy']
entity.isOperator = data['peopleIsOperator']
loginfo(entity)
return entity
|
Create a state for get a person by face recognition ID
|
Create a state for get a person by face recognition ID
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Create a state for get a person by face recognition ID
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
from sara_msgs.msg import Entity
"""
Created on 15/05/2018
@author: Lucas Maurice
"""
class WonderlandGetPersonByRecognitionId(EventState):
'''
Find a person by ID.
># id int Recognition name of the object
#> entity sara_msgs/Entity
<= done return when one entity exist
<= none return when no entity exist
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandGetPersonByRecognitionId, self).__init__(outcomes=['done', 'none', 'error'],
input_keys=['id'], output_keys=['entity'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/?peopleRecognitionId=" + str(userdata.id)
# try the request
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
# parse parameter json data
data = json.loads(response.content)
loginfo(data)
if 'peopleId' in data:
userdata.entity = self.generate_entity(data)
return 'done'
else:
return 'none'
@staticmethod
def generate_entity(data):
entity = Entity()
entity.wonderlandId = data['peopleId']
entity.face.id = data['peopleRecognitionId']
if 'peopleColor' in data and data['peopleColor'] is not None:
entity.color = data['peopleColor'].encode('ascii', 'ignore')
if 'peopleName' in data:
entity.aliases.append(data['peopleName'].encode('ascii', 'ignore'))
if 'peoplePose' in data and data['peoplePose'] is not None:
entity.pose = data['peoplePose'].encode('ascii', 'ignore')
entity.poseProbability = data['peopleGenderAccuracy']
if 'peopleGender' in data and data['peopleGender'] is not None:
entity.face.gender = data['peopleGender'].encode('ascii', 'ignore')
entity.face.genderProbability = data['peopleGenderAccuracy']
entity.isOperator = data['peopleIsOperator']
loginfo(entity)
return entity
|
<commit_before><commit_msg>Create a state for get a person by face recognition ID<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
from sara_msgs.msg import Entity
"""
Created on 15/05/2018
@author: Lucas Maurice
"""
class WonderlandGetPersonByRecognitionId(EventState):
'''
Find a person by ID.
># id int Recognition name of the object
#> entity sara_msgs/Entity
<= done return when one entity exist
<= none return when no entity exist
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandGetPersonByRecognitionId, self).__init__(outcomes=['done', 'none', 'error'],
input_keys=['id'], output_keys=['entity'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/?peopleRecognitionId=" + str(userdata.id)
# try the request
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
# parse parameter json data
data = json.loads(response.content)
loginfo(data)
if 'peopleId' in data:
userdata.entity = self.generate_entity(data)
return 'done'
else:
return 'none'
@staticmethod
def generate_entity(data):
entity = Entity()
entity.wonderlandId = data['peopleId']
entity.face.id = data['peopleRecognitionId']
if 'peopleColor' in data and data['peopleColor'] is not None:
entity.color = data['peopleColor'].encode('ascii', 'ignore')
if 'peopleName' in data:
entity.aliases.append(data['peopleName'].encode('ascii', 'ignore'))
if 'peoplePose' in data and data['peoplePose'] is not None:
entity.pose = data['peoplePose'].encode('ascii', 'ignore')
entity.poseProbability = data['peopleGenderAccuracy']
if 'peopleGender' in data and data['peopleGender'] is not None:
entity.face.gender = data['peopleGender'].encode('ascii', 'ignore')
entity.face.genderProbability = data['peopleGenderAccuracy']
entity.isOperator = data['peopleIsOperator']
loginfo(entity)
return entity
|
Create a state for get a person by face recognition ID#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
from sara_msgs.msg import Entity
"""
Created on 15/05/2018
@author: Lucas Maurice
"""
class WonderlandGetPersonByRecognitionId(EventState):
'''
Find a person by ID.
># id int Recognition name of the object
#> entity sara_msgs/Entity
<= done return when one entity exist
<= none return when no entity exist
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandGetPersonByRecognitionId, self).__init__(outcomes=['done', 'none', 'error'],
input_keys=['id'], output_keys=['entity'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/?peopleRecognitionId=" + str(userdata.id)
# try the request
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
# parse parameter json data
data = json.loads(response.content)
loginfo(data)
if 'peopleId' in data:
userdata.entity = self.generate_entity(data)
return 'done'
else:
return 'none'
@staticmethod
def generate_entity(data):
entity = Entity()
entity.wonderlandId = data['peopleId']
entity.face.id = data['peopleRecognitionId']
if 'peopleColor' in data and data['peopleColor'] is not None:
entity.color = data['peopleColor'].encode('ascii', 'ignore')
if 'peopleName' in data:
entity.aliases.append(data['peopleName'].encode('ascii', 'ignore'))
if 'peoplePose' in data and data['peoplePose'] is not None:
entity.pose = data['peoplePose'].encode('ascii', 'ignore')
entity.poseProbability = data['peopleGenderAccuracy']
if 'peopleGender' in data and data['peopleGender'] is not None:
entity.face.gender = data['peopleGender'].encode('ascii', 'ignore')
entity.face.genderProbability = data['peopleGenderAccuracy']
entity.isOperator = data['peopleIsOperator']
loginfo(entity)
return entity
|
<commit_before><commit_msg>Create a state for get a person by face recognition ID<commit_after>#!/usr/bin/env python
# encoding=utf8
import json
import requests
from flexbe_core import EventState
from rospy import logerr, loginfo
from sara_msgs.msg import Entity
"""
Created on 15/05/2018
@author: Lucas Maurice
"""
class WonderlandGetPersonByRecognitionId(EventState):
'''
Find a person by ID.
># id int Recognition name of the object
#> entity sara_msgs/Entity
<= done return when one entity exist
<= none return when no entity exist
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(WonderlandGetPersonByRecognitionId, self).__init__(outcomes=['done', 'none', 'error'],
input_keys=['id'], output_keys=['entity'])
def execute(self, userdata):
# Generate URL to contact
url = "http://wonderland:8000/api/people/?peopleRecognitionId=" + str(userdata.id)
# try the request
try:
response = requests.get(url)
except requests.exceptions.RequestException as e:
logerr(e)
return 'error'
# parse parameter json data
data = json.loads(response.content)
loginfo(data)
if 'peopleId' in data:
userdata.entity = self.generate_entity(data)
return 'done'
else:
return 'none'
@staticmethod
def generate_entity(data):
entity = Entity()
entity.wonderlandId = data['peopleId']
entity.face.id = data['peopleRecognitionId']
if 'peopleColor' in data and data['peopleColor'] is not None:
entity.color = data['peopleColor'].encode('ascii', 'ignore')
if 'peopleName' in data:
entity.aliases.append(data['peopleName'].encode('ascii', 'ignore'))
if 'peoplePose' in data and data['peoplePose'] is not None:
entity.pose = data['peoplePose'].encode('ascii', 'ignore')
entity.poseProbability = data['peopleGenderAccuracy']
if 'peopleGender' in data and data['peopleGender'] is not None:
entity.face.gender = data['peopleGender'].encode('ascii', 'ignore')
entity.face.genderProbability = data['peopleGenderAccuracy']
entity.isOperator = data['peopleIsOperator']
loginfo(entity)
return entity
|
|
6ff8e6a1662497943584c84462ceda78420d6581
|
inonemonth/challenges/tests/test_github_utils.py
|
inonemonth/challenges/tests/test_github_utils.py
|
import unittest
import django.test
class GithubUtilsTestCase(django.test.TestCase):
def test_get_api_repo_branch_url(self):
self.assertEqual("", "https://api.github.com/repos/RobrechtDR/asiakas/branches/master")
def test_get_repo_and_branch_from_repo_path(self):
self.assertEqual("", ("asiakas", "master"))
|
Write tests for github utils
|
Write tests for github utils
|
Python
|
mit
|
robrechtdr/inonemonth,robrechtdr/inonemonth,robrechtdr/inonemonth,robrechtdr/inonemonth
|
Write tests for github utils
|
import unittest
import django.test
class GithubUtilsTestCase(django.test.TestCase):
def test_get_api_repo_branch_url(self):
self.assertEqual("", "https://api.github.com/repos/RobrechtDR/asiakas/branches/master")
def test_get_repo_and_branch_from_repo_path(self):
self.assertEqual("", ("asiakas", "master"))
|
<commit_before><commit_msg>Write tests for github utils<commit_after>
|
import unittest
import django.test
class GithubUtilsTestCase(django.test.TestCase):
def test_get_api_repo_branch_url(self):
self.assertEqual("", "https://api.github.com/repos/RobrechtDR/asiakas/branches/master")
def test_get_repo_and_branch_from_repo_path(self):
self.assertEqual("", ("asiakas", "master"))
|
Write tests for github utilsimport unittest
import django.test
class GithubUtilsTestCase(django.test.TestCase):
def test_get_api_repo_branch_url(self):
self.assertEqual("", "https://api.github.com/repos/RobrechtDR/asiakas/branches/master")
def test_get_repo_and_branch_from_repo_path(self):
self.assertEqual("", ("asiakas", "master"))
|
<commit_before><commit_msg>Write tests for github utils<commit_after>import unittest
import django.test
class GithubUtilsTestCase(django.test.TestCase):
def test_get_api_repo_branch_url(self):
self.assertEqual("", "https://api.github.com/repos/RobrechtDR/asiakas/branches/master")
def test_get_repo_and_branch_from_repo_path(self):
self.assertEqual("", ("asiakas", "master"))
|
|
d6d2b064df47c6f61755caaaa446f150a5342060
|
bag_graph.py
|
bag_graph.py
|
import bag_engine
'''Generate .dot graph file. Format
digraph G {
1 [label = "room name #1"];
2 [label = "room name #2"];
1 -> 2 [label = "door going from room 1 to room 2"];
}
'''
def create_graph(graph):
room_num = 1
room_name_to_number = {}
for room in graph.keys():
room_name_to_number[room] = room_num
room_num += 1
result = []
for room in graph.keys():
# output the room node
room_number = room_name_to_number[room]
result.append(" " + str(room_number) + ' [label = "' + room + '"];')
for room in graph.keys():
room_number = room_name_to_number[room]
doors = graph[room]["doors"]
for door in doors.keys():
next_room = doors[door]
next_room_number = room_name_to_number[next_room]
result.append(" " + str(room_number) + " -> " + str(next_room_number) + ' [label = "' + door + '"];')
return result
graph = create_graph(bag_engine.g_rooms)
output = open("bag_graph.dot", "w")
output.write("digraph G {" + "\n")
output.writelines("\n".join(graph))
output.writelines("\n}\n")
output.close()
|
Add code to generate .dot file from room data
|
Add code to generate .dot file from room data
|
Python
|
apache-2.0
|
kkrugler/bitney-adventure,kkrugler/bitney-adventure
|
Add code to generate .dot file from room data
|
import bag_engine
'''Generate .dot graph file. Format
digraph G {
1 [label = "room name #1"];
2 [label = "room name #2"];
1 -> 2 [label = "door going from room 1 to room 2"];
}
'''
def create_graph(graph):
room_num = 1
room_name_to_number = {}
for room in graph.keys():
room_name_to_number[room] = room_num
room_num += 1
result = []
for room in graph.keys():
# output the room node
room_number = room_name_to_number[room]
result.append(" " + str(room_number) + ' [label = "' + room + '"];')
for room in graph.keys():
room_number = room_name_to_number[room]
doors = graph[room]["doors"]
for door in doors.keys():
next_room = doors[door]
next_room_number = room_name_to_number[next_room]
result.append(" " + str(room_number) + " -> " + str(next_room_number) + ' [label = "' + door + '"];')
return result
graph = create_graph(bag_engine.g_rooms)
output = open("bag_graph.dot", "w")
output.write("digraph G {" + "\n")
output.writelines("\n".join(graph))
output.writelines("\n}\n")
output.close()
|
<commit_before><commit_msg>Add code to generate .dot file from room data<commit_after>
|
import bag_engine
'''Generate .dot graph file. Format
digraph G {
1 [label = "room name #1"];
2 [label = "room name #2"];
1 -> 2 [label = "door going from room 1 to room 2"];
}
'''
def create_graph(graph):
room_num = 1
room_name_to_number = {}
for room in graph.keys():
room_name_to_number[room] = room_num
room_num += 1
result = []
for room in graph.keys():
# output the room node
room_number = room_name_to_number[room]
result.append(" " + str(room_number) + ' [label = "' + room + '"];')
for room in graph.keys():
room_number = room_name_to_number[room]
doors = graph[room]["doors"]
for door in doors.keys():
next_room = doors[door]
next_room_number = room_name_to_number[next_room]
result.append(" " + str(room_number) + " -> " + str(next_room_number) + ' [label = "' + door + '"];')
return result
graph = create_graph(bag_engine.g_rooms)
output = open("bag_graph.dot", "w")
output.write("digraph G {" + "\n")
output.writelines("\n".join(graph))
output.writelines("\n}\n")
output.close()
|
Add code to generate .dot file from room dataimport bag_engine
'''Generate .dot graph file. Format
digraph G {
1 [label = "room name #1"];
2 [label = "room name #2"];
1 -> 2 [label = "door going from room 1 to room 2"];
}
'''
def create_graph(graph):
room_num = 1
room_name_to_number = {}
for room in graph.keys():
room_name_to_number[room] = room_num
room_num += 1
result = []
for room in graph.keys():
# output the room node
room_number = room_name_to_number[room]
result.append(" " + str(room_number) + ' [label = "' + room + '"];')
for room in graph.keys():
room_number = room_name_to_number[room]
doors = graph[room]["doors"]
for door in doors.keys():
next_room = doors[door]
next_room_number = room_name_to_number[next_room]
result.append(" " + str(room_number) + " -> " + str(next_room_number) + ' [label = "' + door + '"];')
return result
graph = create_graph(bag_engine.g_rooms)
output = open("bag_graph.dot", "w")
output.write("digraph G {" + "\n")
output.writelines("\n".join(graph))
output.writelines("\n}\n")
output.close()
|
<commit_before><commit_msg>Add code to generate .dot file from room data<commit_after>import bag_engine
'''Generate .dot graph file. Format
digraph G {
1 [label = "room name #1"];
2 [label = "room name #2"];
1 -> 2 [label = "door going from room 1 to room 2"];
}
'''
def create_graph(graph):
room_num = 1
room_name_to_number = {}
for room in graph.keys():
room_name_to_number[room] = room_num
room_num += 1
result = []
for room in graph.keys():
# output the room node
room_number = room_name_to_number[room]
result.append(" " + str(room_number) + ' [label = "' + room + '"];')
for room in graph.keys():
room_number = room_name_to_number[room]
doors = graph[room]["doors"]
for door in doors.keys():
next_room = doors[door]
next_room_number = room_name_to_number[next_room]
result.append(" " + str(room_number) + " -> " + str(next_room_number) + ' [label = "' + door + '"];')
return result
graph = create_graph(bag_engine.g_rooms)
output = open("bag_graph.dot", "w")
output.write("digraph G {" + "\n")
output.writelines("\n".join(graph))
output.writelines("\n}\n")
output.close()
|
|
5d0bed8673306456e517ce042d07bfe627a25758
|
analytics/management/commands/clear_analytics_tables.py
|
analytics/management/commands/clear_analytics_tables.py
|
from __future__ import absolute_import
from __future__ import print_function
import sys
from argparse import ArgumentParser
from django.db import connection
from django.core.management.base import BaseCommand
from typing import Any
CLEAR_QUERY = """
DELETE FROM ONLY analytics_installationcount;
DELETE FROM ONLY analytics_realmcount;
DELETE FROM ONLY analytics_usercount;
DELETE FROM ONLY analytics_streamcount;
DELETE FROM ONLY analytics_huddlecount
"""
class Command(BaseCommand):
help = """Clear Analytics tables."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--force',
action='store_true',
help="Clear analytics Tables.")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if options['force']:
cursor = connection.cursor()
cursor.execute(CLEAR_QUERY)
cursor.close()
else:
print("Would delete all data from analytics tables (!); use --force to do so.")
sys.exit(1)
|
Add script to clear analytics tables.
|
Add script to clear analytics tables.
|
Python
|
apache-2.0
|
calvinleenyc/zulip,synicalsyntax/zulip,andersk/zulip,Diptanshu8/zulip,arpith/zulip,Juanvulcano/zulip,christi3k/zulip,dhcrzf/zulip,amyliu345/zulip,jainayush975/zulip,krtkmj/zulip,brainwane/zulip,verma-varsha/zulip,samatdav/zulip,hackerkid/zulip,rht/zulip,mohsenSy/zulip,jainayush975/zulip,sharmaeklavya2/zulip,cosmicAsymmetry/zulip,vaidap/zulip,TigorC/zulip,rishig/zulip,brainwane/zulip,PhilSk/zulip,amyliu345/zulip,blaze225/zulip,JPJPJPOPOP/zulip,Jianchun1/zulip,souravbadami/zulip,christi3k/zulip,jainayush975/zulip,joyhchen/zulip,peguin40/zulip,zacps/zulip,amanharitsh123/zulip,samatdav/zulip,joyhchen/zulip,j831/zulip,dawran6/zulip,dhcrzf/zulip,jrowan/zulip,brockwhittaker/zulip,susansls/zulip,brockwhittaker/zulip,grave-w-grave/zulip,jackrzhang/zulip,tommyip/zulip,vikas-parashar/zulip,kou/zulip,blaze225/zulip,niftynei/zulip,SmartPeople/zulip,Galexrt/zulip,dawran6/zulip,verma-varsha/zulip,brainwane/zulip,amyliu345/zulip,tommyip/zulip,hackerkid/zulip,punchagan/zulip,isht3/zulip,verma-varsha/zulip,isht3/zulip,sonali0901/zulip,susansls/zulip,dawran6/zulip,Jianchun1/zulip,vikas-parashar/zulip,grave-w-grave/zulip,TigorC/zulip,sharmaeklavya2/zulip,dhcrzf/zulip,vaidap/zulip,sonali0901/zulip,Galexrt/zulip,jackrzhang/zulip,jrowan/zulip,zacps/zulip,hackerkid/zulip,andersk/zulip,joyhchen/zulip,timabbott/zulip,arpith/zulip,brainwane/zulip,jainayush975/zulip,reyha/zulip,Diptanshu8/zulip,christi3k/zulip,vabs22/zulip,cosmicAsymmetry/zulip,kou/zulip,vaidap/zulip,mahim97/zulip,zulip/zulip,SmartPeople/zulip,KingxBanana/zulip,dattatreya303/zulip,joyhchen/zulip,verma-varsha/zulip,Juanvulcano/zulip,sup95/zulip,brockwhittaker/zulip,eeshangarg/zulip,rht/zulip,rishig/zulip,SmartPeople/zulip,mohsenSy/zulip,Juanvulcano/zulip,niftynei/zulip,krtkmj/zulip,jphilipsen05/zulip,timabbott/zulip,isht3/zulip,grave-w-grave/zulip,susansls/zulip,timabbott/zulip,shubhamdhama/zulip,synicalsyntax/zulip,mahim97/zulip,showell/zulip,zulip/zulip,eeshangarg/zulip,isht3/zulip,niftynei/zulip,rishig/zulip,vabs22/zulip,tommyip/zulip,reyha/zulip,jrowan/zulip,punchagan/zulip,dattatreya303/zulip,hackerkid/zulip,AZtheAsian/zulip,sharmaeklavya2/zulip,jphilipsen05/zulip,showell/zulip,kou/zulip,Galexrt/zulip,sonali0901/zulip,krtkmj/zulip,vabs22/zulip,dhcrzf/zulip,eeshangarg/zulip,blaze225/zulip,jphilipsen05/zulip,aakash-cr7/zulip,cosmicAsymmetry/zulip,TigorC/zulip,vaidap/zulip,TigorC/zulip,KingxBanana/zulip,mohsenSy/zulip,Juanvulcano/zulip,Galexrt/zulip,Diptanshu8/zulip,brainwane/zulip,cosmicAsymmetry/zulip,kou/zulip,Galexrt/zulip,shubhamdhama/zulip,zulip/zulip,souravbadami/zulip,dawran6/zulip,JPJPJPOPOP/zulip,shubhamdhama/zulip,zulip/zulip,dattatreya303/zulip,TigorC/zulip,eeshangarg/zulip,timabbott/zulip,andersk/zulip,rht/zulip,cosmicAsymmetry/zulip,samatdav/zulip,JPJPJPOPOP/zulip,punchagan/zulip,amyliu345/zulip,jrowan/zulip,JPJPJPOPOP/zulip,synicalsyntax/zulip,samatdav/zulip,umkay/zulip,Diptanshu8/zulip,mohsenSy/zulip,AZtheAsian/zulip,Diptanshu8/zulip,j831/zulip,mohsenSy/zulip,KingxBanana/zulip,brockwhittaker/zulip,vabs22/zulip,grave-w-grave/zulip,tommyip/zulip,reyha/zulip,JPJPJPOPOP/zulip,jainayush975/zulip,dhcrzf/zulip,PhilSk/zulip,PhilSk/zulip,niftynei/zulip,mahim97/zulip,rishig/zulip,ryanbackman/zulip,kou/zulip,JPJPJPOPOP/zulip,sharmaeklavya2/zulip,KingxBanana/zulip,SmartPeople/zulip,blaze225/zulip,j831/zulip,paxapy/zulip,arpith/zulip,calvinleenyc/zulip,showell/zulip,punchagan/zulip,Juanvulcano/zulip,andersk/zulip,andersk/zulip,timabbott/zulip,jackrzhang/zulip,paxapy/zulip,mohsenSy/zulip,krtkmj/zulip,jackrzhang/zulip,timabbott/zulip,sonali0901/zulip,peguin40/zulip,showell/zulip,arpith/zulip,amyliu345/zulip,eeshangarg/zulip,peguin40/zulip,aakash-cr7/zulip,mahim97/zulip,vaidap/zulip,rht/zulip,paxapy/zulip,cosmicAsymmetry/zulip,zacps/zulip,dattatreya303/zulip,brainwane/zulip,AZtheAsian/zulip,rht/zulip,Jianchun1/zulip,reyha/zulip,amanharitsh123/zulip,souravbadami/zulip,umkay/zulip,rht/zulip,zulip/zulip,j831/zulip,AZtheAsian/zulip,amanharitsh123/zulip,brockwhittaker/zulip,zacps/zulip,vabs22/zulip,TigorC/zulip,PhilSk/zulip,arpith/zulip,grave-w-grave/zulip,andersk/zulip,ryanbackman/zulip,shubhamdhama/zulip,Diptanshu8/zulip,jackrzhang/zulip,zacps/zulip,showell/zulip,sharmaeklavya2/zulip,punchagan/zulip,zulip/zulip,kou/zulip,niftynei/zulip,SmartPeople/zulip,christi3k/zulip,verma-varsha/zulip,sup95/zulip,mahim97/zulip,jackrzhang/zulip,SmartPeople/zulip,ryanbackman/zulip,umkay/zulip,rht/zulip,AZtheAsian/zulip,eeshangarg/zulip,tommyip/zulip,paxapy/zulip,jrowan/zulip,jphilipsen05/zulip,Jianchun1/zulip,shubhamdhama/zulip,souravbadami/zulip,sonali0901/zulip,Jianchun1/zulip,ryanbackman/zulip,showell/zulip,andersk/zulip,synicalsyntax/zulip,AZtheAsian/zulip,Galexrt/zulip,paxapy/zulip,sup95/zulip,vaidap/zulip,synicalsyntax/zulip,sonali0901/zulip,blaze225/zulip,krtkmj/zulip,sharmaeklavya2/zulip,dawran6/zulip,samatdav/zulip,vikas-parashar/zulip,amanharitsh123/zulip,christi3k/zulip,jackrzhang/zulip,umkay/zulip,samatdav/zulip,j831/zulip,zulip/zulip,vikas-parashar/zulip,dhcrzf/zulip,punchagan/zulip,hackerkid/zulip,krtkmj/zulip,dawran6/zulip,brockwhittaker/zulip,timabbott/zulip,vikas-parashar/zulip,susansls/zulip,aakash-cr7/zulip,sup95/zulip,PhilSk/zulip,blaze225/zulip,hackerkid/zulip,PhilSk/zulip,dattatreya303/zulip,peguin40/zulip,souravbadami/zulip,souravbadami/zulip,krtkmj/zulip,aakash-cr7/zulip,jainayush975/zulip,tommyip/zulip,rishig/zulip,christi3k/zulip,Jianchun1/zulip,j831/zulip,vabs22/zulip,zacps/zulip,calvinleenyc/zulip,verma-varsha/zulip,sup95/zulip,punchagan/zulip,tommyip/zulip,amanharitsh123/zulip,Galexrt/zulip,grave-w-grave/zulip,eeshangarg/zulip,calvinleenyc/zulip,peguin40/zulip,paxapy/zulip,synicalsyntax/zulip,umkay/zulip,jphilipsen05/zulip,showell/zulip,aakash-cr7/zulip,niftynei/zulip,hackerkid/zulip,shubhamdhama/zulip,isht3/zulip,aakash-cr7/zulip,arpith/zulip,kou/zulip,calvinleenyc/zulip,KingxBanana/zulip,vikas-parashar/zulip,KingxBanana/zulip,reyha/zulip,susansls/zulip,ryanbackman/zulip,peguin40/zulip,dhcrzf/zulip,calvinleenyc/zulip,rishig/zulip,jphilipsen05/zulip,joyhchen/zulip,brainwane/zulip,susansls/zulip,shubhamdhama/zulip,jrowan/zulip,Juanvulcano/zulip,ryanbackman/zulip,isht3/zulip,mahim97/zulip,umkay/zulip,joyhchen/zulip,rishig/zulip,reyha/zulip,dattatreya303/zulip,synicalsyntax/zulip,sup95/zulip,umkay/zulip,amanharitsh123/zulip,amyliu345/zulip
|
Add script to clear analytics tables.
|
from __future__ import absolute_import
from __future__ import print_function
import sys
from argparse import ArgumentParser
from django.db import connection
from django.core.management.base import BaseCommand
from typing import Any
CLEAR_QUERY = """
DELETE FROM ONLY analytics_installationcount;
DELETE FROM ONLY analytics_realmcount;
DELETE FROM ONLY analytics_usercount;
DELETE FROM ONLY analytics_streamcount;
DELETE FROM ONLY analytics_huddlecount
"""
class Command(BaseCommand):
help = """Clear Analytics tables."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--force',
action='store_true',
help="Clear analytics Tables.")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if options['force']:
cursor = connection.cursor()
cursor.execute(CLEAR_QUERY)
cursor.close()
else:
print("Would delete all data from analytics tables (!); use --force to do so.")
sys.exit(1)
|
<commit_before><commit_msg>Add script to clear analytics tables.<commit_after>
|
from __future__ import absolute_import
from __future__ import print_function
import sys
from argparse import ArgumentParser
from django.db import connection
from django.core.management.base import BaseCommand
from typing import Any
CLEAR_QUERY = """
DELETE FROM ONLY analytics_installationcount;
DELETE FROM ONLY analytics_realmcount;
DELETE FROM ONLY analytics_usercount;
DELETE FROM ONLY analytics_streamcount;
DELETE FROM ONLY analytics_huddlecount
"""
class Command(BaseCommand):
help = """Clear Analytics tables."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--force',
action='store_true',
help="Clear analytics Tables.")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if options['force']:
cursor = connection.cursor()
cursor.execute(CLEAR_QUERY)
cursor.close()
else:
print("Would delete all data from analytics tables (!); use --force to do so.")
sys.exit(1)
|
Add script to clear analytics tables.from __future__ import absolute_import
from __future__ import print_function
import sys
from argparse import ArgumentParser
from django.db import connection
from django.core.management.base import BaseCommand
from typing import Any
CLEAR_QUERY = """
DELETE FROM ONLY analytics_installationcount;
DELETE FROM ONLY analytics_realmcount;
DELETE FROM ONLY analytics_usercount;
DELETE FROM ONLY analytics_streamcount;
DELETE FROM ONLY analytics_huddlecount
"""
class Command(BaseCommand):
help = """Clear Analytics tables."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--force',
action='store_true',
help="Clear analytics Tables.")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if options['force']:
cursor = connection.cursor()
cursor.execute(CLEAR_QUERY)
cursor.close()
else:
print("Would delete all data from analytics tables (!); use --force to do so.")
sys.exit(1)
|
<commit_before><commit_msg>Add script to clear analytics tables.<commit_after>from __future__ import absolute_import
from __future__ import print_function
import sys
from argparse import ArgumentParser
from django.db import connection
from django.core.management.base import BaseCommand
from typing import Any
CLEAR_QUERY = """
DELETE FROM ONLY analytics_installationcount;
DELETE FROM ONLY analytics_realmcount;
DELETE FROM ONLY analytics_usercount;
DELETE FROM ONLY analytics_streamcount;
DELETE FROM ONLY analytics_huddlecount
"""
class Command(BaseCommand):
help = """Clear Analytics tables."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('--force',
action='store_true',
help="Clear analytics Tables.")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
if options['force']:
cursor = connection.cursor()
cursor.execute(CLEAR_QUERY)
cursor.close()
else:
print("Would delete all data from analytics tables (!); use --force to do so.")
sys.exit(1)
|
|
2ec55af2c3d97140fe0db64a237c6d927beccaa9
|
statsmodels/tsa/statespace/tests/test_prediction.py
|
statsmodels/tsa/statespace/tests/test_prediction.py
|
"""
Tests for prediction of state space models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import #, print_function
import numpy as np
import pandas as pd
import warnings
from statsmodels.tsa.statespace import sarimax
from numpy.testing import assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
def test_predict_dates():
index = pd.date_range(start='1950-01-01', periods=11, freq='D')
np.random.seed(324328)
endog = pd.Series(np.random.normal(size=10), index=index[:-1])
# Basic test
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res = mod.filter(mod.start_params)
# In-sample prediction should have the same index
pred = res.predict()
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[:-1].values)
# Out-of-sample forecasting should extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
# Simple differencing in the SARIMAX model should eliminate dates of
# series eliminated due to differencing
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter(mod.start_params)
pred = res.predict()
# In-sample prediction should lose the first index value
assert_equal(mod.nobs, endog.shape[0] - 1)
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[1:-1].values)
# Out-of-sample forecasting should still extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
|
Add (failing) test for predict dates.
|
TST: Add (failing) test for predict dates.
|
Python
|
bsd-3-clause
|
bashtage/statsmodels,josef-pkt/statsmodels,josef-pkt/statsmodels,ChadFulton/statsmodels,phobson/statsmodels,yl565/statsmodels,yl565/statsmodels,bashtage/statsmodels,bashtage/statsmodels,ChadFulton/statsmodels,yl565/statsmodels,ChadFulton/statsmodels,phobson/statsmodels,phobson/statsmodels,statsmodels/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,jseabold/statsmodels,statsmodels/statsmodels,bashtage/statsmodels,phobson/statsmodels,josef-pkt/statsmodels,jseabold/statsmodels,statsmodels/statsmodels,ChadFulton/statsmodels,bert9bert/statsmodels,josef-pkt/statsmodels,jseabold/statsmodels,ChadFulton/statsmodels,yl565/statsmodels,bashtage/statsmodels,bert9bert/statsmodels,bert9bert/statsmodels,jseabold/statsmodels,yl565/statsmodels,statsmodels/statsmodels,bert9bert/statsmodels,ChadFulton/statsmodels,bert9bert/statsmodels,josef-pkt/statsmodels,phobson/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,jseabold/statsmodels
|
TST: Add (failing) test for predict dates.
|
"""
Tests for prediction of state space models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import #, print_function
import numpy as np
import pandas as pd
import warnings
from statsmodels.tsa.statespace import sarimax
from numpy.testing import assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
def test_predict_dates():
index = pd.date_range(start='1950-01-01', periods=11, freq='D')
np.random.seed(324328)
endog = pd.Series(np.random.normal(size=10), index=index[:-1])
# Basic test
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res = mod.filter(mod.start_params)
# In-sample prediction should have the same index
pred = res.predict()
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[:-1].values)
# Out-of-sample forecasting should extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
# Simple differencing in the SARIMAX model should eliminate dates of
# series eliminated due to differencing
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter(mod.start_params)
pred = res.predict()
# In-sample prediction should lose the first index value
assert_equal(mod.nobs, endog.shape[0] - 1)
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[1:-1].values)
# Out-of-sample forecasting should still extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
|
<commit_before><commit_msg>TST: Add (failing) test for predict dates.<commit_after>
|
"""
Tests for prediction of state space models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import #, print_function
import numpy as np
import pandas as pd
import warnings
from statsmodels.tsa.statespace import sarimax
from numpy.testing import assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
def test_predict_dates():
index = pd.date_range(start='1950-01-01', periods=11, freq='D')
np.random.seed(324328)
endog = pd.Series(np.random.normal(size=10), index=index[:-1])
# Basic test
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res = mod.filter(mod.start_params)
# In-sample prediction should have the same index
pred = res.predict()
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[:-1].values)
# Out-of-sample forecasting should extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
# Simple differencing in the SARIMAX model should eliminate dates of
# series eliminated due to differencing
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter(mod.start_params)
pred = res.predict()
# In-sample prediction should lose the first index value
assert_equal(mod.nobs, endog.shape[0] - 1)
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[1:-1].values)
# Out-of-sample forecasting should still extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
|
TST: Add (failing) test for predict dates."""
Tests for prediction of state space models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import #, print_function
import numpy as np
import pandas as pd
import warnings
from statsmodels.tsa.statespace import sarimax
from numpy.testing import assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
def test_predict_dates():
index = pd.date_range(start='1950-01-01', periods=11, freq='D')
np.random.seed(324328)
endog = pd.Series(np.random.normal(size=10), index=index[:-1])
# Basic test
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res = mod.filter(mod.start_params)
# In-sample prediction should have the same index
pred = res.predict()
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[:-1].values)
# Out-of-sample forecasting should extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
# Simple differencing in the SARIMAX model should eliminate dates of
# series eliminated due to differencing
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter(mod.start_params)
pred = res.predict()
# In-sample prediction should lose the first index value
assert_equal(mod.nobs, endog.shape[0] - 1)
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[1:-1].values)
# Out-of-sample forecasting should still extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
|
<commit_before><commit_msg>TST: Add (failing) test for predict dates.<commit_after>"""
Tests for prediction of state space models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import #, print_function
import numpy as np
import pandas as pd
import warnings
from statsmodels.tsa.statespace import sarimax
from numpy.testing import assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
def test_predict_dates():
index = pd.date_range(start='1950-01-01', periods=11, freq='D')
np.random.seed(324328)
endog = pd.Series(np.random.normal(size=10), index=index[:-1])
# Basic test
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res = mod.filter(mod.start_params)
# In-sample prediction should have the same index
pred = res.predict()
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[:-1].values)
# Out-of-sample forecasting should extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
# Simple differencing in the SARIMAX model should eliminate dates of
# series eliminated due to differencing
mod = sarimax.SARIMAX(endog, order=(1, 1, 0), simple_differencing=True)
res = mod.filter(mod.start_params)
pred = res.predict()
# In-sample prediction should lose the first index value
assert_equal(mod.nobs, endog.shape[0] - 1)
assert_equal(len(pred), mod.nobs)
assert_equal(pred.index.values, index[1:-1].values)
# Out-of-sample forecasting should still extend the index appropriately
fcast = res.forecast()
assert_equal(fcast.index[0], index[-1])
|
|
4059a0073ef676fba4df47f69b469baf4cef15b1
|
tests/services/authorization/test_role_to_user_assignment.py
|
tests/services/authorization/test_role_to_user_assignment.py
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from testfixtures.user import create_user
from tests.base import AbstractAppTestCase
class RoleToUserAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.role = self.create_role_with_permission('board_moderator',
self.permission_id)
self.user = self.create_user()
def test_assign_role_to_user(self):
role_id = self.role.id
user_id = self.user.id
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_before)
service.assign_role_to_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_after)
def test_deassign_role_from_user(self):
role_id = self.role.id
user_id = self.user.id
service.assign_role_to_user(user_id, role_id)
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_before)
service.deassign_role_from_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def create_user(self):
user = create_user(4, screen_name='Alice')
self.db.session.add(user)
self.db.session.commit()
return user
def create_role_with_permission(self, role_id, permission_id):
role = service.create_role(role_id, role_id)
permission = service.create_permission(permission_id, permission_id)
service.assign_permission_to_role(permission, role)
return role
|
Test (de)assignment of roles to users
|
Test (de)assignment of roles to users
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,m-ober/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps
|
Test (de)assignment of roles to users
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from testfixtures.user import create_user
from tests.base import AbstractAppTestCase
class RoleToUserAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.role = self.create_role_with_permission('board_moderator',
self.permission_id)
self.user = self.create_user()
def test_assign_role_to_user(self):
role_id = self.role.id
user_id = self.user.id
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_before)
service.assign_role_to_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_after)
def test_deassign_role_from_user(self):
role_id = self.role.id
user_id = self.user.id
service.assign_role_to_user(user_id, role_id)
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_before)
service.deassign_role_from_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def create_user(self):
user = create_user(4, screen_name='Alice')
self.db.session.add(user)
self.db.session.commit()
return user
def create_role_with_permission(self, role_id, permission_id):
role = service.create_role(role_id, role_id)
permission = service.create_permission(permission_id, permission_id)
service.assign_permission_to_role(permission, role)
return role
|
<commit_before><commit_msg>Test (de)assignment of roles to users<commit_after>
|
"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from testfixtures.user import create_user
from tests.base import AbstractAppTestCase
class RoleToUserAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.role = self.create_role_with_permission('board_moderator',
self.permission_id)
self.user = self.create_user()
def test_assign_role_to_user(self):
role_id = self.role.id
user_id = self.user.id
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_before)
service.assign_role_to_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_after)
def test_deassign_role_from_user(self):
role_id = self.role.id
user_id = self.user.id
service.assign_role_to_user(user_id, role_id)
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_before)
service.deassign_role_from_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def create_user(self):
user = create_user(4, screen_name='Alice')
self.db.session.add(user)
self.db.session.commit()
return user
def create_role_with_permission(self, role_id, permission_id):
role = service.create_role(role_id, role_id)
permission = service.create_permission(permission_id, permission_id)
service.assign_permission_to_role(permission, role)
return role
|
Test (de)assignment of roles to users"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from testfixtures.user import create_user
from tests.base import AbstractAppTestCase
class RoleToUserAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.role = self.create_role_with_permission('board_moderator',
self.permission_id)
self.user = self.create_user()
def test_assign_role_to_user(self):
role_id = self.role.id
user_id = self.user.id
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_before)
service.assign_role_to_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_after)
def test_deassign_role_from_user(self):
role_id = self.role.id
user_id = self.user.id
service.assign_role_to_user(user_id, role_id)
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_before)
service.deassign_role_from_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def create_user(self):
user = create_user(4, screen_name='Alice')
self.db.session.add(user)
self.db.session.commit()
return user
def create_role_with_permission(self, role_id, permission_id):
role = service.create_role(role_id, role_id)
permission = service.create_permission(permission_id, permission_id)
service.assign_permission_to_role(permission, role)
return role
|
<commit_before><commit_msg>Test (de)assignment of roles to users<commit_after>"""
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from byceps.services.authorization import service
from testfixtures.user import create_user
from tests.base import AbstractAppTestCase
class RoleToUserAssignmentTestCase(AbstractAppTestCase):
def setUp(self):
super().setUp()
self.permission_id = 'board_topic_hide'
self.role = self.create_role_with_permission('board_moderator',
self.permission_id)
self.user = self.create_user()
def test_assign_role_to_user(self):
role_id = self.role.id
user_id = self.user.id
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_before)
service.assign_role_to_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_after)
def test_deassign_role_from_user(self):
role_id = self.role.id
user_id = self.user.id
service.assign_role_to_user(user_id, role_id)
user_permission_ids_before = service.get_permission_ids_for_user(user_id)
self.assertIn(self.permission_id, user_permission_ids_before)
service.deassign_role_from_user(user_id, role_id)
user_permission_ids_after = service.get_permission_ids_for_user(user_id)
self.assertNotIn(self.permission_id, user_permission_ids_after)
# -------------------------------------------------------------------- #
# helpers
def create_user(self):
user = create_user(4, screen_name='Alice')
self.db.session.add(user)
self.db.session.commit()
return user
def create_role_with_permission(self, role_id, permission_id):
role = service.create_role(role_id, role_id)
permission = service.create_permission(permission_id, permission_id)
service.assign_permission_to_role(permission, role)
return role
|
|
430ac9869481fa4be378a13caeb6ca642c5f22f7
|
src/utilities.py
|
src/utilities.py
|
from __future__ import division
import numpy as np
class DiffKernel(object):
"""
A fake kernel that can be used to predict differences two function values.
Given a gp based on measurements, we aim to predict the difference between
the function values at two different test points, X1 and X2; that is, we
want to obtain mean and variance of f(X1) - f(X1). Using this fake
kernel, this can be achieved with
`mean, var = gp.predict(np.vstack((X1, X2)), kern=DiffKernel(gp.kern))`
Parameters
----------
kernel: GPy.kern.*
The kernel used by the GP
"""
def __init__(self, kernel):
self.kern = kernel
def K(self, x1, x2=None):
"""Equivalent of kern.K
If only x1 is passed then it is assumed to contain the data for both
whose differences we are computing. Otherwise, x2 will contain these
extended states (see PosteriorExact._raw_predict in
GPy/inference/latent_function_inference0/posterior.py)
Parameters
----------
x1: np.array
x2: np.array
"""
dim = self.kern.input_dim
if x2 is None:
x10 = x1[:, :dim]
x11 = x1[:, dim:]
return (self.kern.K(x10) + self.kern.K(x11) -
2 * self.kern.K(x10, x11))
else:
x20 = x2[:, :dim]
x21 = x2[:, dim:]
return self.kern.K(x1, x20) - self.kern.K(x1, x21)
def Kdiag(self, x):
"""Equivalent of kern.Kdiag for the difference prediction.
Parameters
----------
x: np.array
"""
dim = self.kern.input_dim
x0 = x[:, :dim]
x1 = x[:, dim:]
return (self.kern.Kdiag(x0) + self.kern.Kdiag(x1) -
2 * np.diag(self.kern.K(x0, x1)))
|
Add fake kernel to predict function differences using GPy
|
Add fake kernel to predict function differences using GPy
|
Python
|
mit
|
befelix/SafeMDP,befelix/SafeMDP
|
Add fake kernel to predict function differences using GPy
|
from __future__ import division
import numpy as np
class DiffKernel(object):
"""
A fake kernel that can be used to predict differences two function values.
Given a gp based on measurements, we aim to predict the difference between
the function values at two different test points, X1 and X2; that is, we
want to obtain mean and variance of f(X1) - f(X1). Using this fake
kernel, this can be achieved with
`mean, var = gp.predict(np.vstack((X1, X2)), kern=DiffKernel(gp.kern))`
Parameters
----------
kernel: GPy.kern.*
The kernel used by the GP
"""
def __init__(self, kernel):
self.kern = kernel
def K(self, x1, x2=None):
"""Equivalent of kern.K
If only x1 is passed then it is assumed to contain the data for both
whose differences we are computing. Otherwise, x2 will contain these
extended states (see PosteriorExact._raw_predict in
GPy/inference/latent_function_inference0/posterior.py)
Parameters
----------
x1: np.array
x2: np.array
"""
dim = self.kern.input_dim
if x2 is None:
x10 = x1[:, :dim]
x11 = x1[:, dim:]
return (self.kern.K(x10) + self.kern.K(x11) -
2 * self.kern.K(x10, x11))
else:
x20 = x2[:, :dim]
x21 = x2[:, dim:]
return self.kern.K(x1, x20) - self.kern.K(x1, x21)
def Kdiag(self, x):
"""Equivalent of kern.Kdiag for the difference prediction.
Parameters
----------
x: np.array
"""
dim = self.kern.input_dim
x0 = x[:, :dim]
x1 = x[:, dim:]
return (self.kern.Kdiag(x0) + self.kern.Kdiag(x1) -
2 * np.diag(self.kern.K(x0, x1)))
|
<commit_before><commit_msg>Add fake kernel to predict function differences using GPy<commit_after>
|
from __future__ import division
import numpy as np
class DiffKernel(object):
"""
A fake kernel that can be used to predict differences two function values.
Given a gp based on measurements, we aim to predict the difference between
the function values at two different test points, X1 and X2; that is, we
want to obtain mean and variance of f(X1) - f(X1). Using this fake
kernel, this can be achieved with
`mean, var = gp.predict(np.vstack((X1, X2)), kern=DiffKernel(gp.kern))`
Parameters
----------
kernel: GPy.kern.*
The kernel used by the GP
"""
def __init__(self, kernel):
self.kern = kernel
def K(self, x1, x2=None):
"""Equivalent of kern.K
If only x1 is passed then it is assumed to contain the data for both
whose differences we are computing. Otherwise, x2 will contain these
extended states (see PosteriorExact._raw_predict in
GPy/inference/latent_function_inference0/posterior.py)
Parameters
----------
x1: np.array
x2: np.array
"""
dim = self.kern.input_dim
if x2 is None:
x10 = x1[:, :dim]
x11 = x1[:, dim:]
return (self.kern.K(x10) + self.kern.K(x11) -
2 * self.kern.K(x10, x11))
else:
x20 = x2[:, :dim]
x21 = x2[:, dim:]
return self.kern.K(x1, x20) - self.kern.K(x1, x21)
def Kdiag(self, x):
"""Equivalent of kern.Kdiag for the difference prediction.
Parameters
----------
x: np.array
"""
dim = self.kern.input_dim
x0 = x[:, :dim]
x1 = x[:, dim:]
return (self.kern.Kdiag(x0) + self.kern.Kdiag(x1) -
2 * np.diag(self.kern.K(x0, x1)))
|
Add fake kernel to predict function differences using GPyfrom __future__ import division
import numpy as np
class DiffKernel(object):
"""
A fake kernel that can be used to predict differences two function values.
Given a gp based on measurements, we aim to predict the difference between
the function values at two different test points, X1 and X2; that is, we
want to obtain mean and variance of f(X1) - f(X1). Using this fake
kernel, this can be achieved with
`mean, var = gp.predict(np.vstack((X1, X2)), kern=DiffKernel(gp.kern))`
Parameters
----------
kernel: GPy.kern.*
The kernel used by the GP
"""
def __init__(self, kernel):
self.kern = kernel
def K(self, x1, x2=None):
"""Equivalent of kern.K
If only x1 is passed then it is assumed to contain the data for both
whose differences we are computing. Otherwise, x2 will contain these
extended states (see PosteriorExact._raw_predict in
GPy/inference/latent_function_inference0/posterior.py)
Parameters
----------
x1: np.array
x2: np.array
"""
dim = self.kern.input_dim
if x2 is None:
x10 = x1[:, :dim]
x11 = x1[:, dim:]
return (self.kern.K(x10) + self.kern.K(x11) -
2 * self.kern.K(x10, x11))
else:
x20 = x2[:, :dim]
x21 = x2[:, dim:]
return self.kern.K(x1, x20) - self.kern.K(x1, x21)
def Kdiag(self, x):
"""Equivalent of kern.Kdiag for the difference prediction.
Parameters
----------
x: np.array
"""
dim = self.kern.input_dim
x0 = x[:, :dim]
x1 = x[:, dim:]
return (self.kern.Kdiag(x0) + self.kern.Kdiag(x1) -
2 * np.diag(self.kern.K(x0, x1)))
|
<commit_before><commit_msg>Add fake kernel to predict function differences using GPy<commit_after>from __future__ import division
import numpy as np
class DiffKernel(object):
"""
A fake kernel that can be used to predict differences two function values.
Given a gp based on measurements, we aim to predict the difference between
the function values at two different test points, X1 and X2; that is, we
want to obtain mean and variance of f(X1) - f(X1). Using this fake
kernel, this can be achieved with
`mean, var = gp.predict(np.vstack((X1, X2)), kern=DiffKernel(gp.kern))`
Parameters
----------
kernel: GPy.kern.*
The kernel used by the GP
"""
def __init__(self, kernel):
self.kern = kernel
def K(self, x1, x2=None):
"""Equivalent of kern.K
If only x1 is passed then it is assumed to contain the data for both
whose differences we are computing. Otherwise, x2 will contain these
extended states (see PosteriorExact._raw_predict in
GPy/inference/latent_function_inference0/posterior.py)
Parameters
----------
x1: np.array
x2: np.array
"""
dim = self.kern.input_dim
if x2 is None:
x10 = x1[:, :dim]
x11 = x1[:, dim:]
return (self.kern.K(x10) + self.kern.K(x11) -
2 * self.kern.K(x10, x11))
else:
x20 = x2[:, :dim]
x21 = x2[:, dim:]
return self.kern.K(x1, x20) - self.kern.K(x1, x21)
def Kdiag(self, x):
"""Equivalent of kern.Kdiag for the difference prediction.
Parameters
----------
x: np.array
"""
dim = self.kern.input_dim
x0 = x[:, :dim]
x1 = x[:, dim:]
return (self.kern.Kdiag(x0) + self.kern.Kdiag(x1) -
2 * np.diag(self.kern.K(x0, x1)))
|
|
d82546916e00410565cd919d173ef7359e08ef71
|
test/test_cfg.py
|
test/test_cfg.py
|
import locale
import unittest
from datetime import datetime
from cfg import parse_cfg_date, _fix_locale, pretty_date
class TestDateHandling(unittest.TestCase):
def test_date_parsing(self):
test_date = "2015-01-02 18:06"
parsed = parse_cfg_date(test_date)
self.assertEquals(parsed, datetime(2015, 01, 02, 18, 06))
def test_locale_set(self):
oldloc = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, "C")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
with _fix_locale("de_DE"):
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], "de_DE")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
def test_date_format(self):
date = datetime(2015, 10, 17, 18, 06)
self.assertEquals(pretty_date(date), "17.10.")
self.assertEquals(pretty_date(date, show_year=True), "17.10.2015")
self.assertEquals(pretty_date(date, month_name=True), "17. Oktober")
self.assertEquals(pretty_date(date, with_weekday=True), "Samstag, den 17.10.")
self.assertEquals(pretty_date(date, show_year=True, month_name=True),
"17. Oktober 2015")
self.assertEquals(pretty_date(date, show_year=True, month_name=True, with_weekday=True),
"Samstag, den 17. Oktober 2015")
|
Add unittests for date stuff.
|
Add unittests for date stuff.
|
Python
|
bsd-3-clause
|
janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system
|
Add unittests for date stuff.
|
import locale
import unittest
from datetime import datetime
from cfg import parse_cfg_date, _fix_locale, pretty_date
class TestDateHandling(unittest.TestCase):
def test_date_parsing(self):
test_date = "2015-01-02 18:06"
parsed = parse_cfg_date(test_date)
self.assertEquals(parsed, datetime(2015, 01, 02, 18, 06))
def test_locale_set(self):
oldloc = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, "C")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
with _fix_locale("de_DE"):
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], "de_DE")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
def test_date_format(self):
date = datetime(2015, 10, 17, 18, 06)
self.assertEquals(pretty_date(date), "17.10.")
self.assertEquals(pretty_date(date, show_year=True), "17.10.2015")
self.assertEquals(pretty_date(date, month_name=True), "17. Oktober")
self.assertEquals(pretty_date(date, with_weekday=True), "Samstag, den 17.10.")
self.assertEquals(pretty_date(date, show_year=True, month_name=True),
"17. Oktober 2015")
self.assertEquals(pretty_date(date, show_year=True, month_name=True, with_weekday=True),
"Samstag, den 17. Oktober 2015")
|
<commit_before><commit_msg>Add unittests for date stuff.<commit_after>
|
import locale
import unittest
from datetime import datetime
from cfg import parse_cfg_date, _fix_locale, pretty_date
class TestDateHandling(unittest.TestCase):
def test_date_parsing(self):
test_date = "2015-01-02 18:06"
parsed = parse_cfg_date(test_date)
self.assertEquals(parsed, datetime(2015, 01, 02, 18, 06))
def test_locale_set(self):
oldloc = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, "C")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
with _fix_locale("de_DE"):
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], "de_DE")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
def test_date_format(self):
date = datetime(2015, 10, 17, 18, 06)
self.assertEquals(pretty_date(date), "17.10.")
self.assertEquals(pretty_date(date, show_year=True), "17.10.2015")
self.assertEquals(pretty_date(date, month_name=True), "17. Oktober")
self.assertEquals(pretty_date(date, with_weekday=True), "Samstag, den 17.10.")
self.assertEquals(pretty_date(date, show_year=True, month_name=True),
"17. Oktober 2015")
self.assertEquals(pretty_date(date, show_year=True, month_name=True, with_weekday=True),
"Samstag, den 17. Oktober 2015")
|
Add unittests for date stuff.import locale
import unittest
from datetime import datetime
from cfg import parse_cfg_date, _fix_locale, pretty_date
class TestDateHandling(unittest.TestCase):
def test_date_parsing(self):
test_date = "2015-01-02 18:06"
parsed = parse_cfg_date(test_date)
self.assertEquals(parsed, datetime(2015, 01, 02, 18, 06))
def test_locale_set(self):
oldloc = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, "C")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
with _fix_locale("de_DE"):
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], "de_DE")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
def test_date_format(self):
date = datetime(2015, 10, 17, 18, 06)
self.assertEquals(pretty_date(date), "17.10.")
self.assertEquals(pretty_date(date, show_year=True), "17.10.2015")
self.assertEquals(pretty_date(date, month_name=True), "17. Oktober")
self.assertEquals(pretty_date(date, with_weekday=True), "Samstag, den 17.10.")
self.assertEquals(pretty_date(date, show_year=True, month_name=True),
"17. Oktober 2015")
self.assertEquals(pretty_date(date, show_year=True, month_name=True, with_weekday=True),
"Samstag, den 17. Oktober 2015")
|
<commit_before><commit_msg>Add unittests for date stuff.<commit_after>import locale
import unittest
from datetime import datetime
from cfg import parse_cfg_date, _fix_locale, pretty_date
class TestDateHandling(unittest.TestCase):
def test_date_parsing(self):
test_date = "2015-01-02 18:06"
parsed = parse_cfg_date(test_date)
self.assertEquals(parsed, datetime(2015, 01, 02, 18, 06))
def test_locale_set(self):
oldloc = locale.getlocale(locale.LC_TIME)
locale.setlocale(locale.LC_TIME, "C")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
with _fix_locale("de_DE"):
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], "de_DE")
self.assertEquals(locale.getlocale(locale.LC_TIME)[0], None)
def test_date_format(self):
date = datetime(2015, 10, 17, 18, 06)
self.assertEquals(pretty_date(date), "17.10.")
self.assertEquals(pretty_date(date, show_year=True), "17.10.2015")
self.assertEquals(pretty_date(date, month_name=True), "17. Oktober")
self.assertEquals(pretty_date(date, with_weekday=True), "Samstag, den 17.10.")
self.assertEquals(pretty_date(date, show_year=True, month_name=True),
"17. Oktober 2015")
self.assertEquals(pretty_date(date, show_year=True, month_name=True, with_weekday=True),
"Samstag, den 17. Oktober 2015")
|
|
064578b33fd4a58ce427dd4547deb3859ab56ebb
|
corehq/blobs/migrations/0007_blobmeta_migrated_check.py
|
corehq/blobs/migrations/0007_blobmeta_migrated_check.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-05 00:21
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
import sys
import traceback
from django.core.management import call_command
from django.db import connections, migrations
from corehq.sql_db.operations import HqRunPython, noop_migration_fn
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
BLOBMETAS_NOT_MIGRATED_ERROR = """
Blob metadata needs to be migrated before this environment can be upgraded to
the latest version of CommCareHQ. Instructions for running the migration can be
found at this link:
https://github.com/dimagi/commcare-cloud/blob/master/docs/changelog/0009-blob-metadata-part-2.md
If you are unable to run the management command because it has been deleted,
you will need to checkout an older version of CommCareHQ first:
git checkout acc40116a96a40c64efb8613fb2ba5933122b151
"""
def get_num_attachments():
"""Get the number of attachments that need to be migrated"""
def count(dbname):
with connections[dbname].cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM form_processor_xformattachmentsql")
return cursor.fetchone()[0]
return sum(count(db) for db in get_db_aliases_for_partitioned_query())
def _assert_blobmetas_migrated(apps, schema_editor):
"""Check if migrated. Raises SystemExit if not migrated"""
num_attachments = get_num_attachments()
migrated = num_attachments == 0
if migrated:
return
if num_attachments < 10000:
try:
call_command(
"run_sql",
"simple_move_form_attachments_to_blobmeta",
yes=True,
)
migrated = get_num_attachments() == 0
if not migrated:
print("Automatic migration failed")
migrated = False
except Exception:
traceback.print_exc()
else:
print("Found %s attachments." % num_attachments)
print("Too many to migrate automatically.")
if not migrated:
print("")
print(BLOBMETAS_NOT_MIGRATED_ERROR)
sys.exit(1)
class Migration(migrations.Migration):
dependencies = [
('blobs', '0006_restrict_form_attachments'),
]
operations = [
HqRunPython(_assert_blobmetas_migrated, noop_migration_fn)
]
|
Add blob metadata migration check
|
Add blob metadata migration check
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add blob metadata migration check
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-05 00:21
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
import sys
import traceback
from django.core.management import call_command
from django.db import connections, migrations
from corehq.sql_db.operations import HqRunPython, noop_migration_fn
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
BLOBMETAS_NOT_MIGRATED_ERROR = """
Blob metadata needs to be migrated before this environment can be upgraded to
the latest version of CommCareHQ. Instructions for running the migration can be
found at this link:
https://github.com/dimagi/commcare-cloud/blob/master/docs/changelog/0009-blob-metadata-part-2.md
If you are unable to run the management command because it has been deleted,
you will need to checkout an older version of CommCareHQ first:
git checkout acc40116a96a40c64efb8613fb2ba5933122b151
"""
def get_num_attachments():
"""Get the number of attachments that need to be migrated"""
def count(dbname):
with connections[dbname].cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM form_processor_xformattachmentsql")
return cursor.fetchone()[0]
return sum(count(db) for db in get_db_aliases_for_partitioned_query())
def _assert_blobmetas_migrated(apps, schema_editor):
"""Check if migrated. Raises SystemExit if not migrated"""
num_attachments = get_num_attachments()
migrated = num_attachments == 0
if migrated:
return
if num_attachments < 10000:
try:
call_command(
"run_sql",
"simple_move_form_attachments_to_blobmeta",
yes=True,
)
migrated = get_num_attachments() == 0
if not migrated:
print("Automatic migration failed")
migrated = False
except Exception:
traceback.print_exc()
else:
print("Found %s attachments." % num_attachments)
print("Too many to migrate automatically.")
if not migrated:
print("")
print(BLOBMETAS_NOT_MIGRATED_ERROR)
sys.exit(1)
class Migration(migrations.Migration):
dependencies = [
('blobs', '0006_restrict_form_attachments'),
]
operations = [
HqRunPython(_assert_blobmetas_migrated, noop_migration_fn)
]
|
<commit_before><commit_msg>Add blob metadata migration check<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-05 00:21
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
import sys
import traceback
from django.core.management import call_command
from django.db import connections, migrations
from corehq.sql_db.operations import HqRunPython, noop_migration_fn
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
BLOBMETAS_NOT_MIGRATED_ERROR = """
Blob metadata needs to be migrated before this environment can be upgraded to
the latest version of CommCareHQ. Instructions for running the migration can be
found at this link:
https://github.com/dimagi/commcare-cloud/blob/master/docs/changelog/0009-blob-metadata-part-2.md
If you are unable to run the management command because it has been deleted,
you will need to checkout an older version of CommCareHQ first:
git checkout acc40116a96a40c64efb8613fb2ba5933122b151
"""
def get_num_attachments():
"""Get the number of attachments that need to be migrated"""
def count(dbname):
with connections[dbname].cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM form_processor_xformattachmentsql")
return cursor.fetchone()[0]
return sum(count(db) for db in get_db_aliases_for_partitioned_query())
def _assert_blobmetas_migrated(apps, schema_editor):
"""Check if migrated. Raises SystemExit if not migrated"""
num_attachments = get_num_attachments()
migrated = num_attachments == 0
if migrated:
return
if num_attachments < 10000:
try:
call_command(
"run_sql",
"simple_move_form_attachments_to_blobmeta",
yes=True,
)
migrated = get_num_attachments() == 0
if not migrated:
print("Automatic migration failed")
migrated = False
except Exception:
traceback.print_exc()
else:
print("Found %s attachments." % num_attachments)
print("Too many to migrate automatically.")
if not migrated:
print("")
print(BLOBMETAS_NOT_MIGRATED_ERROR)
sys.exit(1)
class Migration(migrations.Migration):
dependencies = [
('blobs', '0006_restrict_form_attachments'),
]
operations = [
HqRunPython(_assert_blobmetas_migrated, noop_migration_fn)
]
|
Add blob metadata migration check# -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-05 00:21
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
import sys
import traceback
from django.core.management import call_command
from django.db import connections, migrations
from corehq.sql_db.operations import HqRunPython, noop_migration_fn
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
BLOBMETAS_NOT_MIGRATED_ERROR = """
Blob metadata needs to be migrated before this environment can be upgraded to
the latest version of CommCareHQ. Instructions for running the migration can be
found at this link:
https://github.com/dimagi/commcare-cloud/blob/master/docs/changelog/0009-blob-metadata-part-2.md
If you are unable to run the management command because it has been deleted,
you will need to checkout an older version of CommCareHQ first:
git checkout acc40116a96a40c64efb8613fb2ba5933122b151
"""
def get_num_attachments():
"""Get the number of attachments that need to be migrated"""
def count(dbname):
with connections[dbname].cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM form_processor_xformattachmentsql")
return cursor.fetchone()[0]
return sum(count(db) for db in get_db_aliases_for_partitioned_query())
def _assert_blobmetas_migrated(apps, schema_editor):
"""Check if migrated. Raises SystemExit if not migrated"""
num_attachments = get_num_attachments()
migrated = num_attachments == 0
if migrated:
return
if num_attachments < 10000:
try:
call_command(
"run_sql",
"simple_move_form_attachments_to_blobmeta",
yes=True,
)
migrated = get_num_attachments() == 0
if not migrated:
print("Automatic migration failed")
migrated = False
except Exception:
traceback.print_exc()
else:
print("Found %s attachments." % num_attachments)
print("Too many to migrate automatically.")
if not migrated:
print("")
print(BLOBMETAS_NOT_MIGRATED_ERROR)
sys.exit(1)
class Migration(migrations.Migration):
dependencies = [
('blobs', '0006_restrict_form_attachments'),
]
operations = [
HqRunPython(_assert_blobmetas_migrated, noop_migration_fn)
]
|
<commit_before><commit_msg>Add blob metadata migration check<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.14 on 2019-01-05 00:21
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import print_function
import sys
import traceback
from django.core.management import call_command
from django.db import connections, migrations
from corehq.sql_db.operations import HqRunPython, noop_migration_fn
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
BLOBMETAS_NOT_MIGRATED_ERROR = """
Blob metadata needs to be migrated before this environment can be upgraded to
the latest version of CommCareHQ. Instructions for running the migration can be
found at this link:
https://github.com/dimagi/commcare-cloud/blob/master/docs/changelog/0009-blob-metadata-part-2.md
If you are unable to run the management command because it has been deleted,
you will need to checkout an older version of CommCareHQ first:
git checkout acc40116a96a40c64efb8613fb2ba5933122b151
"""
def get_num_attachments():
"""Get the number of attachments that need to be migrated"""
def count(dbname):
with connections[dbname].cursor() as cursor:
cursor.execute("SELECT COUNT(*) FROM form_processor_xformattachmentsql")
return cursor.fetchone()[0]
return sum(count(db) for db in get_db_aliases_for_partitioned_query())
def _assert_blobmetas_migrated(apps, schema_editor):
"""Check if migrated. Raises SystemExit if not migrated"""
num_attachments = get_num_attachments()
migrated = num_attachments == 0
if migrated:
return
if num_attachments < 10000:
try:
call_command(
"run_sql",
"simple_move_form_attachments_to_blobmeta",
yes=True,
)
migrated = get_num_attachments() == 0
if not migrated:
print("Automatic migration failed")
migrated = False
except Exception:
traceback.print_exc()
else:
print("Found %s attachments." % num_attachments)
print("Too many to migrate automatically.")
if not migrated:
print("")
print(BLOBMETAS_NOT_MIGRATED_ERROR)
sys.exit(1)
class Migration(migrations.Migration):
dependencies = [
('blobs', '0006_restrict_form_attachments'),
]
operations = [
HqRunPython(_assert_blobmetas_migrated, noop_migration_fn)
]
|
|
7397c321644c6f14dbb69c2d082d19fc977625e8
|
waffle/migrations/0002_auto_20161201_0958.py
|
waffle/migrations/0002_auto_20161201_0958.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-01 09:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waffle', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='switch',
name='active',
field=models.BooleanField(default=False, help_text='Is this switch active?'),
),
]
|
Add missing migration for switch model
|
Add missing migration for switch model
|
Python
|
bsd-3-clause
|
rsalmaso/django-waffle,rodgomes/django-waffle,willkg/django-waffle,rsalmaso/django-waffle,rodgomes/django-waffle,willkg/django-waffle,rodgomes/django-waffle,rsalmaso/django-waffle,rsalmaso/django-waffle,rodgomes/django-waffle
|
Add missing migration for switch model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-01 09:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waffle', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='switch',
name='active',
field=models.BooleanField(default=False, help_text='Is this switch active?'),
),
]
|
<commit_before><commit_msg>Add missing migration for switch model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-01 09:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waffle', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='switch',
name='active',
field=models.BooleanField(default=False, help_text='Is this switch active?'),
),
]
|
Add missing migration for switch model# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-01 09:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waffle', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='switch',
name='active',
field=models.BooleanField(default=False, help_text='Is this switch active?'),
),
]
|
<commit_before><commit_msg>Add missing migration for switch model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-01 09:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('waffle', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='switch',
name='active',
field=models.BooleanField(default=False, help_text='Is this switch active?'),
),
]
|
|
f5ab12540c21fdfe5497b16d3f05c57fb021fbc9
|
misc/bad_points.py
|
misc/bad_points.py
|
from laspy.file import File
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="""Accept the path to a .LAS file,
and print a list of point records
with invalid (X,Y,Z) information.""")
parser.add_argument("in_file", metavar="Input File", type = str, nargs=1, help = "Path to input file")
args = parser.parse_args()
inFile = File(args.in_file[0], mode = "r")
X_invalid = np.logical_or((inFile.header.min[0] > inFile.x), (inFile.header.max[0] < inFile.x))
Y_invalid = np.logical_or((inFile.header.min[1] > inFile.y), (inFile.header.max[1] < inFile.y))
Z_invalid = np.logical_or((inFile.header.min[2] > inFile.z), (inFile.header.max[2] < inFile.z))
bad_indices = np.where(np.logical_or(X_invalid, Y_invalid, Z_invalid))
print("Array bad_indices: ")
print(bad_indices)
|
Add script to find bad point indices.
|
Add script to find bad point indices.
|
Python
|
bsd-2-clause
|
silyko/laspy,blazbratanic/laspy,blazbratanic/laspy,silyko/laspy
|
Add script to find bad point indices.
|
from laspy.file import File
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="""Accept the path to a .LAS file,
and print a list of point records
with invalid (X,Y,Z) information.""")
parser.add_argument("in_file", metavar="Input File", type = str, nargs=1, help = "Path to input file")
args = parser.parse_args()
inFile = File(args.in_file[0], mode = "r")
X_invalid = np.logical_or((inFile.header.min[0] > inFile.x), (inFile.header.max[0] < inFile.x))
Y_invalid = np.logical_or((inFile.header.min[1] > inFile.y), (inFile.header.max[1] < inFile.y))
Z_invalid = np.logical_or((inFile.header.min[2] > inFile.z), (inFile.header.max[2] < inFile.z))
bad_indices = np.where(np.logical_or(X_invalid, Y_invalid, Z_invalid))
print("Array bad_indices: ")
print(bad_indices)
|
<commit_before><commit_msg>Add script to find bad point indices.<commit_after>
|
from laspy.file import File
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="""Accept the path to a .LAS file,
and print a list of point records
with invalid (X,Y,Z) information.""")
parser.add_argument("in_file", metavar="Input File", type = str, nargs=1, help = "Path to input file")
args = parser.parse_args()
inFile = File(args.in_file[0], mode = "r")
X_invalid = np.logical_or((inFile.header.min[0] > inFile.x), (inFile.header.max[0] < inFile.x))
Y_invalid = np.logical_or((inFile.header.min[1] > inFile.y), (inFile.header.max[1] < inFile.y))
Z_invalid = np.logical_or((inFile.header.min[2] > inFile.z), (inFile.header.max[2] < inFile.z))
bad_indices = np.where(np.logical_or(X_invalid, Y_invalid, Z_invalid))
print("Array bad_indices: ")
print(bad_indices)
|
Add script to find bad point indices.from laspy.file import File
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="""Accept the path to a .LAS file,
and print a list of point records
with invalid (X,Y,Z) information.""")
parser.add_argument("in_file", metavar="Input File", type = str, nargs=1, help = "Path to input file")
args = parser.parse_args()
inFile = File(args.in_file[0], mode = "r")
X_invalid = np.logical_or((inFile.header.min[0] > inFile.x), (inFile.header.max[0] < inFile.x))
Y_invalid = np.logical_or((inFile.header.min[1] > inFile.y), (inFile.header.max[1] < inFile.y))
Z_invalid = np.logical_or((inFile.header.min[2] > inFile.z), (inFile.header.max[2] < inFile.z))
bad_indices = np.where(np.logical_or(X_invalid, Y_invalid, Z_invalid))
print("Array bad_indices: ")
print(bad_indices)
|
<commit_before><commit_msg>Add script to find bad point indices.<commit_after>from laspy.file import File
import numpy as np
import argparse
parser = argparse.ArgumentParser(description="""Accept the path to a .LAS file,
and print a list of point records
with invalid (X,Y,Z) information.""")
parser.add_argument("in_file", metavar="Input File", type = str, nargs=1, help = "Path to input file")
args = parser.parse_args()
inFile = File(args.in_file[0], mode = "r")
X_invalid = np.logical_or((inFile.header.min[0] > inFile.x), (inFile.header.max[0] < inFile.x))
Y_invalid = np.logical_or((inFile.header.min[1] > inFile.y), (inFile.header.max[1] < inFile.y))
Z_invalid = np.logical_or((inFile.header.min[2] > inFile.z), (inFile.header.max[2] < inFile.z))
bad_indices = np.where(np.logical_or(X_invalid, Y_invalid, Z_invalid))
print("Array bad_indices: ")
print(bad_indices)
|
|
ef8f941b2b2dc082c7c47a9e304ff0d44173629a
|
src/sentry/api/decorators.py
|
src/sentry/api/decorators.py
|
from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
|
from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey, ApiToken
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey) or \
isinstance(request.auth, ApiToken)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
|
Add sudo support for auth tokens
|
Add sudo support for auth tokens
This fixes #3525
|
Python
|
bsd-3-clause
|
fotinakis/sentry,zenefits/sentry,ifduyue/sentry,JamesMura/sentry,beeftornado/sentry,jean/sentry,BuildingLink/sentry,zenefits/sentry,ifduyue/sentry,JackDanger/sentry,mvaled/sentry,BuildingLink/sentry,fotinakis/sentry,JackDanger/sentry,looker/sentry,alexm92/sentry,mitsuhiko/sentry,looker/sentry,gencer/sentry,beeftornado/sentry,gencer/sentry,mvaled/sentry,JamesMura/sentry,gencer/sentry,jean/sentry,ifduyue/sentry,jean/sentry,fotinakis/sentry,JamesMura/sentry,ifduyue/sentry,fotinakis/sentry,looker/sentry,BuildingLink/sentry,JamesMura/sentry,mvaled/sentry,gencer/sentry,ifduyue/sentry,beeftornado/sentry,JackDanger/sentry,zenefits/sentry,looker/sentry,jean/sentry,mvaled/sentry,mitsuhiko/sentry,looker/sentry,zenefits/sentry,gencer/sentry,alexm92/sentry,mvaled/sentry,alexm92/sentry,mvaled/sentry,JamesMura/sentry,BuildingLink/sentry,zenefits/sentry,jean/sentry,BuildingLink/sentry
|
from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
Add sudo support for auth tokens
This fixes #3525
|
from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey, ApiToken
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey) or \
isinstance(request.auth, ApiToken)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
|
<commit_before>from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
<commit_msg>Add sudo support for auth tokens
This fixes #3525<commit_after>
|
from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey, ApiToken
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey) or \
isinstance(request.auth, ApiToken)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
|
from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
Add sudo support for auth tokens
This fixes #3525from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey, ApiToken
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey) or \
isinstance(request.auth, ApiToken)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
|
<commit_before>from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
<commit_msg>Add sudo support for auth tokens
This fixes #3525<commit_after>from __future__ import absolute_import
import json
from django.http import HttpResponse
from functools import wraps
from sentry.models import ApiKey, ApiToken
def is_considered_sudo(request):
return request.is_sudo() or \
isinstance(request.auth, ApiKey) or \
isinstance(request.auth, ApiToken)
def sudo_required(func):
@wraps(func)
def wrapped(self, request, *args, **kwargs):
# If we are already authenticated through an API key we do not
# care about the sudo flag.
if not is_considered_sudo(request):
# TODO(dcramer): support some kind of auth flow to allow this
# externally
data = {
"error": "Account verification required.",
"sudoRequired": True,
"username": request.user.username,
}
return HttpResponse(json.dumps(data), status=401)
return func(self, request, *args, **kwargs)
return wrapped
|
49db72b01d1782a58497fbef53ca018ebeb76170
|
kaggle/titanic/categorical_and_scaler_prediction.py
|
kaggle/titanic/categorical_and_scaler_prediction.py
|
import pandas
def main():
train_all = pandas.DataFrame.from_csv('train.csv')
train = train_all[['Survived', 'Sex', 'Fare']]
print(train)
if __name__ == '__main__':
main()
|
Read in train data to dataframe
|
Read in train data to dataframe
|
Python
|
mit
|
noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit,noelevans/sandpit
|
Read in train data to dataframe
|
import pandas
def main():
train_all = pandas.DataFrame.from_csv('train.csv')
train = train_all[['Survived', 'Sex', 'Fare']]
print(train)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Read in train data to dataframe<commit_after>
|
import pandas
def main():
train_all = pandas.DataFrame.from_csv('train.csv')
train = train_all[['Survived', 'Sex', 'Fare']]
print(train)
if __name__ == '__main__':
main()
|
Read in train data to dataframeimport pandas
def main():
train_all = pandas.DataFrame.from_csv('train.csv')
train = train_all[['Survived', 'Sex', 'Fare']]
print(train)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Read in train data to dataframe<commit_after>import pandas
def main():
train_all = pandas.DataFrame.from_csv('train.csv')
train = train_all[['Survived', 'Sex', 'Fare']]
print(train)
if __name__ == '__main__':
main()
|
|
cb347b9fdb7b71bd2cc100547bd171336f7ac272
|
delete-sessions.py
|
delete-sessions.py
|
"""Deletes old sessions to clear up space."""
import datetime
import os
import sys
import time
def get_files(path):
"""Returns the absolute paths of all files under a path."""
for walk_information in os.walk("data/sessions/data/container_file/"):
directory_name, _, file_names = walk_information
for file_name in file_names:
full_path = os.path.join(directory_name, file_name)
yield full_path
def sizeof_fmt(num, suffix="B"):
for unit in ["","Ki","Mi","Gi","Ti","Pi","Ei","Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def main():
"""Main."""
sessions_path = "data/sessions/data/container_file/"
days = 7
cutoff = datetime.datetime.now() - datetime.timedelta(days=days)
print("Calculating size of files older than {} days...".format(days))
bytes_count = 0
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
bytes_count += stat.st_size
print("Files older than {} days comprise {}".format(days, sizeof_fmt(bytes_count)))
wait_seconds = 5
print("Cleaning in {} seconds...".format(wait_seconds))
time.sleep(wait_seconds)
print("Cleaning")
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
os.unlink(file_path)
if __name__ == "__main__":
if sys.version_info.major < 3:
print("Please use Python 3")
sys.exit(1)
main()
|
Add script to delete old sessions
|
Add script to delete old sessions
|
Python
|
mit
|
bskari/park-stamper,bskari/park-stamper,bskari/park-stamper
|
Add script to delete old sessions
|
"""Deletes old sessions to clear up space."""
import datetime
import os
import sys
import time
def get_files(path):
"""Returns the absolute paths of all files under a path."""
for walk_information in os.walk("data/sessions/data/container_file/"):
directory_name, _, file_names = walk_information
for file_name in file_names:
full_path = os.path.join(directory_name, file_name)
yield full_path
def sizeof_fmt(num, suffix="B"):
for unit in ["","Ki","Mi","Gi","Ti","Pi","Ei","Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def main():
"""Main."""
sessions_path = "data/sessions/data/container_file/"
days = 7
cutoff = datetime.datetime.now() - datetime.timedelta(days=days)
print("Calculating size of files older than {} days...".format(days))
bytes_count = 0
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
bytes_count += stat.st_size
print("Files older than {} days comprise {}".format(days, sizeof_fmt(bytes_count)))
wait_seconds = 5
print("Cleaning in {} seconds...".format(wait_seconds))
time.sleep(wait_seconds)
print("Cleaning")
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
os.unlink(file_path)
if __name__ == "__main__":
if sys.version_info.major < 3:
print("Please use Python 3")
sys.exit(1)
main()
|
<commit_before><commit_msg>Add script to delete old sessions<commit_after>
|
"""Deletes old sessions to clear up space."""
import datetime
import os
import sys
import time
def get_files(path):
"""Returns the absolute paths of all files under a path."""
for walk_information in os.walk("data/sessions/data/container_file/"):
directory_name, _, file_names = walk_information
for file_name in file_names:
full_path = os.path.join(directory_name, file_name)
yield full_path
def sizeof_fmt(num, suffix="B"):
for unit in ["","Ki","Mi","Gi","Ti","Pi","Ei","Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def main():
"""Main."""
sessions_path = "data/sessions/data/container_file/"
days = 7
cutoff = datetime.datetime.now() - datetime.timedelta(days=days)
print("Calculating size of files older than {} days...".format(days))
bytes_count = 0
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
bytes_count += stat.st_size
print("Files older than {} days comprise {}".format(days, sizeof_fmt(bytes_count)))
wait_seconds = 5
print("Cleaning in {} seconds...".format(wait_seconds))
time.sleep(wait_seconds)
print("Cleaning")
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
os.unlink(file_path)
if __name__ == "__main__":
if sys.version_info.major < 3:
print("Please use Python 3")
sys.exit(1)
main()
|
Add script to delete old sessions"""Deletes old sessions to clear up space."""
import datetime
import os
import sys
import time
def get_files(path):
"""Returns the absolute paths of all files under a path."""
for walk_information in os.walk("data/sessions/data/container_file/"):
directory_name, _, file_names = walk_information
for file_name in file_names:
full_path = os.path.join(directory_name, file_name)
yield full_path
def sizeof_fmt(num, suffix="B"):
for unit in ["","Ki","Mi","Gi","Ti","Pi","Ei","Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def main():
"""Main."""
sessions_path = "data/sessions/data/container_file/"
days = 7
cutoff = datetime.datetime.now() - datetime.timedelta(days=days)
print("Calculating size of files older than {} days...".format(days))
bytes_count = 0
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
bytes_count += stat.st_size
print("Files older than {} days comprise {}".format(days, sizeof_fmt(bytes_count)))
wait_seconds = 5
print("Cleaning in {} seconds...".format(wait_seconds))
time.sleep(wait_seconds)
print("Cleaning")
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
os.unlink(file_path)
if __name__ == "__main__":
if sys.version_info.major < 3:
print("Please use Python 3")
sys.exit(1)
main()
|
<commit_before><commit_msg>Add script to delete old sessions<commit_after>"""Deletes old sessions to clear up space."""
import datetime
import os
import sys
import time
def get_files(path):
"""Returns the absolute paths of all files under a path."""
for walk_information in os.walk("data/sessions/data/container_file/"):
directory_name, _, file_names = walk_information
for file_name in file_names:
full_path = os.path.join(directory_name, file_name)
yield full_path
def sizeof_fmt(num, suffix="B"):
for unit in ["","Ki","Mi","Gi","Ti","Pi","Ei","Zi"]:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
def main():
"""Main."""
sessions_path = "data/sessions/data/container_file/"
days = 7
cutoff = datetime.datetime.now() - datetime.timedelta(days=days)
print("Calculating size of files older than {} days...".format(days))
bytes_count = 0
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
bytes_count += stat.st_size
print("Files older than {} days comprise {}".format(days, sizeof_fmt(bytes_count)))
wait_seconds = 5
print("Cleaning in {} seconds...".format(wait_seconds))
time.sleep(wait_seconds)
print("Cleaning")
for file_path in get_files(sessions_path):
stat = os.stat(file_path)
if datetime.datetime.fromtimestamp(stat.st_mtime) < cutoff:
os.unlink(file_path)
if __name__ == "__main__":
if sys.version_info.major < 3:
print("Please use Python 3")
sys.exit(1)
main()
|
|
f89aa0cb546311200514c20f41a60954522d0711
|
mzalendo/kenya/management/commands/kenya_adwords_csv.py
|
mzalendo/kenya/management/commands/kenya_adwords_csv.py
|
import csv
import os
import sys
from optparse import make_option
from core.models import Position
from django.core.management.base import NoArgsCommand
data_directory = os.path.join(sys.path[0], 'kenya', '2013-election-data')
class Command(NoArgsCommand):
help = 'Generate a CSV file with all candiates for generating Google AdWords'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
headings = ['FullName', 'MzURL', 'AspirantPosition', 'County', 'Constituency', 'Ward', 'Parties', 'Keywords']
with open('aspirants-county-and-smaller.csv', 'w') as fp:
writer = csv.DictWriter(fp, fieldnames=headings)
writer.writerow(dict((h, h) for h in headings))
for race_type in ('governor',
'senator',
'women-representative',
'mp',
'ward-representative'):
for position in Position.objects.filter(title__slug=('aspirant-' + race_type)).currently_active():
person = position.person
full_names = [person.legal_name]
full_names += [an.alternative_name for an in person.alternative_names.all()]
for full_name in set(full_names):
row = {'FullName': full_name,
'MzURL': 'http://info.mzalendo.com' + person.get_absolute_url(),
'AspirantPosition': race_type}
place = position.place
if place.kind.slug == 'ward':
row['Ward'] = place.name
row['Constituency'] = place.parent_place.name
row['County'] = place.parent_place.parent_place.name
elif place.kind.slug == 'constituency':
row['Constituency'] = place.name
row['County'] = place.parent_place.name
elif place.kind.slug == 'county':
row['County'] = place.name
else:
raise Exception, "Unknown place: %s" % (place)
row['Parties'] = ", ".join(p.name.strip() for p in person.parties())
for key, value in row.items():
row[key] = unicode(value).encode('utf-8')
writer.writerow(row)
|
Add a command to generate a CSV file candidates for Google AdWords
|
Add a command to generate a CSV file candidates for Google AdWords
This is for issue #585 - as I understand it, this wouldn't
be used directly but is helpful for creating AdWords en masse.
|
Python
|
agpl-3.0
|
geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th,ken-muturi/pombola,hzj123/56th,patricmutwiri/pombola,geoffkilpin/pombola,mysociety/pombola,hzj123/56th,mysociety/pombola,geoffkilpin/pombola,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,patricmutwiri/pombola,mysociety/pombola,patricmutwiri/pombola,ken-muturi/pombola,hzj123/56th,hzj123/56th,ken-muturi/pombola,geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,patricmutwiri/pombola
|
Add a command to generate a CSV file candidates for Google AdWords
This is for issue #585 - as I understand it, this wouldn't
be used directly but is helpful for creating AdWords en masse.
|
import csv
import os
import sys
from optparse import make_option
from core.models import Position
from django.core.management.base import NoArgsCommand
data_directory = os.path.join(sys.path[0], 'kenya', '2013-election-data')
class Command(NoArgsCommand):
help = 'Generate a CSV file with all candiates for generating Google AdWords'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
headings = ['FullName', 'MzURL', 'AspirantPosition', 'County', 'Constituency', 'Ward', 'Parties', 'Keywords']
with open('aspirants-county-and-smaller.csv', 'w') as fp:
writer = csv.DictWriter(fp, fieldnames=headings)
writer.writerow(dict((h, h) for h in headings))
for race_type in ('governor',
'senator',
'women-representative',
'mp',
'ward-representative'):
for position in Position.objects.filter(title__slug=('aspirant-' + race_type)).currently_active():
person = position.person
full_names = [person.legal_name]
full_names += [an.alternative_name for an in person.alternative_names.all()]
for full_name in set(full_names):
row = {'FullName': full_name,
'MzURL': 'http://info.mzalendo.com' + person.get_absolute_url(),
'AspirantPosition': race_type}
place = position.place
if place.kind.slug == 'ward':
row['Ward'] = place.name
row['Constituency'] = place.parent_place.name
row['County'] = place.parent_place.parent_place.name
elif place.kind.slug == 'constituency':
row['Constituency'] = place.name
row['County'] = place.parent_place.name
elif place.kind.slug == 'county':
row['County'] = place.name
else:
raise Exception, "Unknown place: %s" % (place)
row['Parties'] = ", ".join(p.name.strip() for p in person.parties())
for key, value in row.items():
row[key] = unicode(value).encode('utf-8')
writer.writerow(row)
|
<commit_before><commit_msg>Add a command to generate a CSV file candidates for Google AdWords
This is for issue #585 - as I understand it, this wouldn't
be used directly but is helpful for creating AdWords en masse.<commit_after>
|
import csv
import os
import sys
from optparse import make_option
from core.models import Position
from django.core.management.base import NoArgsCommand
data_directory = os.path.join(sys.path[0], 'kenya', '2013-election-data')
class Command(NoArgsCommand):
help = 'Generate a CSV file with all candiates for generating Google AdWords'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
headings = ['FullName', 'MzURL', 'AspirantPosition', 'County', 'Constituency', 'Ward', 'Parties', 'Keywords']
with open('aspirants-county-and-smaller.csv', 'w') as fp:
writer = csv.DictWriter(fp, fieldnames=headings)
writer.writerow(dict((h, h) for h in headings))
for race_type in ('governor',
'senator',
'women-representative',
'mp',
'ward-representative'):
for position in Position.objects.filter(title__slug=('aspirant-' + race_type)).currently_active():
person = position.person
full_names = [person.legal_name]
full_names += [an.alternative_name for an in person.alternative_names.all()]
for full_name in set(full_names):
row = {'FullName': full_name,
'MzURL': 'http://info.mzalendo.com' + person.get_absolute_url(),
'AspirantPosition': race_type}
place = position.place
if place.kind.slug == 'ward':
row['Ward'] = place.name
row['Constituency'] = place.parent_place.name
row['County'] = place.parent_place.parent_place.name
elif place.kind.slug == 'constituency':
row['Constituency'] = place.name
row['County'] = place.parent_place.name
elif place.kind.slug == 'county':
row['County'] = place.name
else:
raise Exception, "Unknown place: %s" % (place)
row['Parties'] = ", ".join(p.name.strip() for p in person.parties())
for key, value in row.items():
row[key] = unicode(value).encode('utf-8')
writer.writerow(row)
|
Add a command to generate a CSV file candidates for Google AdWords
This is for issue #585 - as I understand it, this wouldn't
be used directly but is helpful for creating AdWords en masse.import csv
import os
import sys
from optparse import make_option
from core.models import Position
from django.core.management.base import NoArgsCommand
data_directory = os.path.join(sys.path[0], 'kenya', '2013-election-data')
class Command(NoArgsCommand):
help = 'Generate a CSV file with all candiates for generating Google AdWords'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
headings = ['FullName', 'MzURL', 'AspirantPosition', 'County', 'Constituency', 'Ward', 'Parties', 'Keywords']
with open('aspirants-county-and-smaller.csv', 'w') as fp:
writer = csv.DictWriter(fp, fieldnames=headings)
writer.writerow(dict((h, h) for h in headings))
for race_type in ('governor',
'senator',
'women-representative',
'mp',
'ward-representative'):
for position in Position.objects.filter(title__slug=('aspirant-' + race_type)).currently_active():
person = position.person
full_names = [person.legal_name]
full_names += [an.alternative_name for an in person.alternative_names.all()]
for full_name in set(full_names):
row = {'FullName': full_name,
'MzURL': 'http://info.mzalendo.com' + person.get_absolute_url(),
'AspirantPosition': race_type}
place = position.place
if place.kind.slug == 'ward':
row['Ward'] = place.name
row['Constituency'] = place.parent_place.name
row['County'] = place.parent_place.parent_place.name
elif place.kind.slug == 'constituency':
row['Constituency'] = place.name
row['County'] = place.parent_place.name
elif place.kind.slug == 'county':
row['County'] = place.name
else:
raise Exception, "Unknown place: %s" % (place)
row['Parties'] = ", ".join(p.name.strip() for p in person.parties())
for key, value in row.items():
row[key] = unicode(value).encode('utf-8')
writer.writerow(row)
|
<commit_before><commit_msg>Add a command to generate a CSV file candidates for Google AdWords
This is for issue #585 - as I understand it, this wouldn't
be used directly but is helpful for creating AdWords en masse.<commit_after>import csv
import os
import sys
from optparse import make_option
from core.models import Position
from django.core.management.base import NoArgsCommand
data_directory = os.path.join(sys.path[0], 'kenya', '2013-election-data')
class Command(NoArgsCommand):
help = 'Generate a CSV file with all candiates for generating Google AdWords'
option_list = NoArgsCommand.option_list + (
make_option('--commit', action='store_true', dest='commit', help='Actually update the database'),
)
def handle_noargs(self, **options):
headings = ['FullName', 'MzURL', 'AspirantPosition', 'County', 'Constituency', 'Ward', 'Parties', 'Keywords']
with open('aspirants-county-and-smaller.csv', 'w') as fp:
writer = csv.DictWriter(fp, fieldnames=headings)
writer.writerow(dict((h, h) for h in headings))
for race_type in ('governor',
'senator',
'women-representative',
'mp',
'ward-representative'):
for position in Position.objects.filter(title__slug=('aspirant-' + race_type)).currently_active():
person = position.person
full_names = [person.legal_name]
full_names += [an.alternative_name for an in person.alternative_names.all()]
for full_name in set(full_names):
row = {'FullName': full_name,
'MzURL': 'http://info.mzalendo.com' + person.get_absolute_url(),
'AspirantPosition': race_type}
place = position.place
if place.kind.slug == 'ward':
row['Ward'] = place.name
row['Constituency'] = place.parent_place.name
row['County'] = place.parent_place.parent_place.name
elif place.kind.slug == 'constituency':
row['Constituency'] = place.name
row['County'] = place.parent_place.name
elif place.kind.slug == 'county':
row['County'] = place.name
else:
raise Exception, "Unknown place: %s" % (place)
row['Parties'] = ", ".join(p.name.strip() for p in person.parties())
for key, value in row.items():
row[key] = unicode(value).encode('utf-8')
writer.writerow(row)
|
|
0fd68b4ac82bf867365c9cc5e0a129dbb51d8247
|
teamstats/migrations/0002_auto_20180828_1937.py
|
teamstats/migrations/0002_auto_20180828_1937.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-28 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teamstats', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='mp4',
field=models.FilePathField(blank=True, match='.*\\.mp4$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='ogg',
field=models.FilePathField(blank=True, match='.*\\.ogv$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='webm',
field=models.FilePathField(blank=True, match='.*\\.webm$', null=True, path='videos', recursive=True),
),
]
|
Add migrations for media files
|
Add migrations for media files
|
Python
|
agpl-3.0
|
jluttine/django-sportsteam,jluttine/django-sportsteam,jluttine/django-sportsteam,jluttine/django-sportsteam
|
Add migrations for media files
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-28 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teamstats', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='mp4',
field=models.FilePathField(blank=True, match='.*\\.mp4$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='ogg',
field=models.FilePathField(blank=True, match='.*\\.ogv$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='webm',
field=models.FilePathField(blank=True, match='.*\\.webm$', null=True, path='videos', recursive=True),
),
]
|
<commit_before><commit_msg>Add migrations for media files<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-28 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teamstats', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='mp4',
field=models.FilePathField(blank=True, match='.*\\.mp4$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='ogg',
field=models.FilePathField(blank=True, match='.*\\.ogv$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='webm',
field=models.FilePathField(blank=True, match='.*\\.webm$', null=True, path='videos', recursive=True),
),
]
|
Add migrations for media files# -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-28 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teamstats', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='mp4',
field=models.FilePathField(blank=True, match='.*\\.mp4$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='ogg',
field=models.FilePathField(blank=True, match='.*\\.ogv$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='webm',
field=models.FilePathField(blank=True, match='.*\\.webm$', null=True, path='videos', recursive=True),
),
]
|
<commit_before><commit_msg>Add migrations for media files<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.13 on 2018-08-28 16:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teamstats', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='video',
name='mp4',
field=models.FilePathField(blank=True, match='.*\\.mp4$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='ogg',
field=models.FilePathField(blank=True, match='.*\\.ogv$', null=True, path='videos', recursive=True),
),
migrations.AlterField(
model_name='video',
name='webm',
field=models.FilePathField(blank=True, match='.*\\.webm$', null=True, path='videos', recursive=True),
),
]
|
|
ad1dd9066f04f0dda10a1c991480ba506d53676a
|
benchmarks/benchmarks/bench_array_coercion.py
|
benchmarks/benchmarks/bench_array_coercion.py
|
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class ArrayCoercionSmall(Benchmark):
# More detailed benchmarks for array coercion,
# some basic benchmarks are in `bench_core.py`.
params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
param_names = ['array_like']
int64 = np.dtype(np.int64)
def time_array_invalid_kwarg(self, array_like):
try:
np.array(array_like, ndmin="not-integer")
except TypeError:
pass
def time_array(self, array_like):
np.array(array_like)
def time_array_dtype_not_kwargs(self, array_like):
np.array(array_like, self.int64)
def time_array_no_copy(self, array_like):
np.array(array_like, copy=False)
def time_array_subok(self, array_like):
np.array(array_like, subok=True)
def time_array_all_kwargs(self, array_like):
np.array(array_like, dtype=self.int64, copy=False, order="F",
subok=False, ndmin=2)
def time_asarray(self, array_like):
np.asarray(array_like)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_asanyarray(self, array_like):
np.asarray(array_like)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_ascontiguousarray(self, array_like):
np.ascontiguousarray(array_like)
|
Add benchmark for small array coercions
|
BENCH: Add benchmark for small array coercions
Note that since the benchmarks are so fast, the actual results
are diluted behind one more python function call.
|
Python
|
bsd-3-clause
|
seberg/numpy,anntzer/numpy,endolith/numpy,abalkin/numpy,grlee77/numpy,mattip/numpy,simongibbons/numpy,pbrod/numpy,endolith/numpy,numpy/numpy,mattip/numpy,madphysicist/numpy,jakirkham/numpy,rgommers/numpy,grlee77/numpy,pdebuyl/numpy,anntzer/numpy,rgommers/numpy,jakirkham/numpy,charris/numpy,simongibbons/numpy,grlee77/numpy,madphysicist/numpy,pdebuyl/numpy,numpy/numpy,jakirkham/numpy,mhvk/numpy,mhvk/numpy,numpy/numpy,anntzer/numpy,abalkin/numpy,pbrod/numpy,pdebuyl/numpy,anntzer/numpy,seberg/numpy,jakirkham/numpy,endolith/numpy,madphysicist/numpy,simongibbons/numpy,seberg/numpy,mhvk/numpy,seberg/numpy,numpy/numpy,mattip/numpy,grlee77/numpy,pbrod/numpy,endolith/numpy,pbrod/numpy,charris/numpy,charris/numpy,charris/numpy,mhvk/numpy,mhvk/numpy,jakirkham/numpy,pdebuyl/numpy,pbrod/numpy,WarrenWeckesser/numpy,madphysicist/numpy,madphysicist/numpy,simongibbons/numpy,grlee77/numpy,WarrenWeckesser/numpy,simongibbons/numpy,rgommers/numpy,rgommers/numpy,WarrenWeckesser/numpy,mattip/numpy,WarrenWeckesser/numpy,WarrenWeckesser/numpy,abalkin/numpy
|
BENCH: Add benchmark for small array coercions
Note that since the benchmarks are so fast, the actual results
are diluted behind one more python function call.
|
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class ArrayCoercionSmall(Benchmark):
# More detailed benchmarks for array coercion,
# some basic benchmarks are in `bench_core.py`.
params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
param_names = ['array_like']
int64 = np.dtype(np.int64)
def time_array_invalid_kwarg(self, array_like):
try:
np.array(array_like, ndmin="not-integer")
except TypeError:
pass
def time_array(self, array_like):
np.array(array_like)
def time_array_dtype_not_kwargs(self, array_like):
np.array(array_like, self.int64)
def time_array_no_copy(self, array_like):
np.array(array_like, copy=False)
def time_array_subok(self, array_like):
np.array(array_like, subok=True)
def time_array_all_kwargs(self, array_like):
np.array(array_like, dtype=self.int64, copy=False, order="F",
subok=False, ndmin=2)
def time_asarray(self, array_like):
np.asarray(array_like)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_asanyarray(self, array_like):
np.asarray(array_like)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_ascontiguousarray(self, array_like):
np.ascontiguousarray(array_like)
|
<commit_before><commit_msg>BENCH: Add benchmark for small array coercions
Note that since the benchmarks are so fast, the actual results
are diluted behind one more python function call.<commit_after>
|
from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class ArrayCoercionSmall(Benchmark):
# More detailed benchmarks for array coercion,
# some basic benchmarks are in `bench_core.py`.
params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
param_names = ['array_like']
int64 = np.dtype(np.int64)
def time_array_invalid_kwarg(self, array_like):
try:
np.array(array_like, ndmin="not-integer")
except TypeError:
pass
def time_array(self, array_like):
np.array(array_like)
def time_array_dtype_not_kwargs(self, array_like):
np.array(array_like, self.int64)
def time_array_no_copy(self, array_like):
np.array(array_like, copy=False)
def time_array_subok(self, array_like):
np.array(array_like, subok=True)
def time_array_all_kwargs(self, array_like):
np.array(array_like, dtype=self.int64, copy=False, order="F",
subok=False, ndmin=2)
def time_asarray(self, array_like):
np.asarray(array_like)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_asanyarray(self, array_like):
np.asarray(array_like)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_ascontiguousarray(self, array_like):
np.ascontiguousarray(array_like)
|
BENCH: Add benchmark for small array coercions
Note that since the benchmarks are so fast, the actual results
are diluted behind one more python function call.from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class ArrayCoercionSmall(Benchmark):
# More detailed benchmarks for array coercion,
# some basic benchmarks are in `bench_core.py`.
params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
param_names = ['array_like']
int64 = np.dtype(np.int64)
def time_array_invalid_kwarg(self, array_like):
try:
np.array(array_like, ndmin="not-integer")
except TypeError:
pass
def time_array(self, array_like):
np.array(array_like)
def time_array_dtype_not_kwargs(self, array_like):
np.array(array_like, self.int64)
def time_array_no_copy(self, array_like):
np.array(array_like, copy=False)
def time_array_subok(self, array_like):
np.array(array_like, subok=True)
def time_array_all_kwargs(self, array_like):
np.array(array_like, dtype=self.int64, copy=False, order="F",
subok=False, ndmin=2)
def time_asarray(self, array_like):
np.asarray(array_like)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_asanyarray(self, array_like):
np.asarray(array_like)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_ascontiguousarray(self, array_like):
np.ascontiguousarray(array_like)
|
<commit_before><commit_msg>BENCH: Add benchmark for small array coercions
Note that since the benchmarks are so fast, the actual results
are diluted behind one more python function call.<commit_after>from __future__ import absolute_import, division, print_function
from .common import Benchmark
import numpy as np
class ArrayCoercionSmall(Benchmark):
# More detailed benchmarks for array coercion,
# some basic benchmarks are in `bench_core.py`.
params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
param_names = ['array_like']
int64 = np.dtype(np.int64)
def time_array_invalid_kwarg(self, array_like):
try:
np.array(array_like, ndmin="not-integer")
except TypeError:
pass
def time_array(self, array_like):
np.array(array_like)
def time_array_dtype_not_kwargs(self, array_like):
np.array(array_like, self.int64)
def time_array_no_copy(self, array_like):
np.array(array_like, copy=False)
def time_array_subok(self, array_like):
np.array(array_like, subok=True)
def time_array_all_kwargs(self, array_like):
np.array(array_like, dtype=self.int64, copy=False, order="F",
subok=False, ndmin=2)
def time_asarray(self, array_like):
np.asarray(array_like)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_asanyarray(self, array_like):
np.asarray(array_like)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64)
def time_asanyarray_dtype(self, array_like):
np.array(array_like, dtype=self.int64, order="F")
def time_ascontiguousarray(self, array_like):
np.ascontiguousarray(array_like)
|
|
640057f8c821fe7bf5e7968f4a7ff5039d0e9317
|
test/unit/ggrc/converters/test_import_helper.py
|
test/unit/ggrc/converters/test_import_helper.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import unittest
from ggrc import app # noqa
from ggrc import converters
from ggrc.converters import import_helper
from ggrc.converters import column_handlers
class TestGetObjectColumnDefinitons(unittest.TestCase):
def test_object_column_handlers(self):
def test_single_object(obj):
handlers = column_handlers.COLUMN_HANDLERS
column_definitions = import_helper.get_object_column_definitions(obj)
for key, value in column_definitions.items():
if key in handlers:
self.assertEqual(
value["handler"],
handlers[key],
"Object '{}', column '{}': expected {}, found {}".format(
obj.__name__,
key,
handlers[key].__name__,
value["handler"].__name__,
)
)
verificationErrors = []
for obj in set(converters.get_exportables().values()):
try:
test_single_object(obj)
except AssertionError as e:
verificationErrors.append(str(e))
verificationErrors.sort()
self.assertEqual(verificationErrors, [])
|
Add tests for correct column handlers
|
Add tests for correct column handlers
|
Python
|
apache-2.0
|
prasannav7/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core
|
Add tests for correct column handlers
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import unittest
from ggrc import app # noqa
from ggrc import converters
from ggrc.converters import import_helper
from ggrc.converters import column_handlers
class TestGetObjectColumnDefinitons(unittest.TestCase):
def test_object_column_handlers(self):
def test_single_object(obj):
handlers = column_handlers.COLUMN_HANDLERS
column_definitions = import_helper.get_object_column_definitions(obj)
for key, value in column_definitions.items():
if key in handlers:
self.assertEqual(
value["handler"],
handlers[key],
"Object '{}', column '{}': expected {}, found {}".format(
obj.__name__,
key,
handlers[key].__name__,
value["handler"].__name__,
)
)
verificationErrors = []
for obj in set(converters.get_exportables().values()):
try:
test_single_object(obj)
except AssertionError as e:
verificationErrors.append(str(e))
verificationErrors.sort()
self.assertEqual(verificationErrors, [])
|
<commit_before><commit_msg>Add tests for correct column handlers<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import unittest
from ggrc import app # noqa
from ggrc import converters
from ggrc.converters import import_helper
from ggrc.converters import column_handlers
class TestGetObjectColumnDefinitons(unittest.TestCase):
def test_object_column_handlers(self):
def test_single_object(obj):
handlers = column_handlers.COLUMN_HANDLERS
column_definitions = import_helper.get_object_column_definitions(obj)
for key, value in column_definitions.items():
if key in handlers:
self.assertEqual(
value["handler"],
handlers[key],
"Object '{}', column '{}': expected {}, found {}".format(
obj.__name__,
key,
handlers[key].__name__,
value["handler"].__name__,
)
)
verificationErrors = []
for obj in set(converters.get_exportables().values()):
try:
test_single_object(obj)
except AssertionError as e:
verificationErrors.append(str(e))
verificationErrors.sort()
self.assertEqual(verificationErrors, [])
|
Add tests for correct column handlers# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import unittest
from ggrc import app # noqa
from ggrc import converters
from ggrc.converters import import_helper
from ggrc.converters import column_handlers
class TestGetObjectColumnDefinitons(unittest.TestCase):
def test_object_column_handlers(self):
def test_single_object(obj):
handlers = column_handlers.COLUMN_HANDLERS
column_definitions = import_helper.get_object_column_definitions(obj)
for key, value in column_definitions.items():
if key in handlers:
self.assertEqual(
value["handler"],
handlers[key],
"Object '{}', column '{}': expected {}, found {}".format(
obj.__name__,
key,
handlers[key].__name__,
value["handler"].__name__,
)
)
verificationErrors = []
for obj in set(converters.get_exportables().values()):
try:
test_single_object(obj)
except AssertionError as e:
verificationErrors.append(str(e))
verificationErrors.sort()
self.assertEqual(verificationErrors, [])
|
<commit_before><commit_msg>Add tests for correct column handlers<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import unittest
from ggrc import app # noqa
from ggrc import converters
from ggrc.converters import import_helper
from ggrc.converters import column_handlers
class TestGetObjectColumnDefinitons(unittest.TestCase):
def test_object_column_handlers(self):
def test_single_object(obj):
handlers = column_handlers.COLUMN_HANDLERS
column_definitions = import_helper.get_object_column_definitions(obj)
for key, value in column_definitions.items():
if key in handlers:
self.assertEqual(
value["handler"],
handlers[key],
"Object '{}', column '{}': expected {}, found {}".format(
obj.__name__,
key,
handlers[key].__name__,
value["handler"].__name__,
)
)
verificationErrors = []
for obj in set(converters.get_exportables().values()):
try:
test_single_object(obj)
except AssertionError as e:
verificationErrors.append(str(e))
verificationErrors.sort()
self.assertEqual(verificationErrors, [])
|
|
c52bcf13f4759554eebaef8c6a573217226655ef
|
tests/test_injectorplugin.py
|
tests/test_injectorplugin.py
|
import pytest
from fanstatic import Library, Resource, NeededResources
from fanstatic.injector import InjectorPlugin
from fanstatic.registry import InjectorRegistry
from fanstatic import make_injector
from fanstatic import ConfigurationError
from fanstatic.injector import TopBottomInjector
class TopInjector(InjectorPlugin):
name = 'top'
def __call__(self, html, needed):
needed_html = self.make_inclusion(needed).render()
return html.replace('<head>', '<head>%s' % needed_html, 1)
def test_injector_based_on_injectorplugin():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', bottom=True)
needed = NeededResources(resources=[a,b])
inj = TopInjector({})
html = b'<html><head></head><body></body></html>'
assert inj(html, needed) == \
'''<html><head><link rel="stylesheet" type="text/css" href="/fanstatic/foo/a.css" />
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" /></head><body></body></html>'''
class TestingRegistry(object):
def __init__(self, request):
self.request = request
def add_injector(self, injector):
return self._register_injector(InjectorRegistry, injector)
def _register_injector(self, registry, injector):
self.request.addfinalizer(
lambda: registry.instance().pop(injector.name))
registry.instance().add(injector)
return injector
@pytest.fixture
def injectors(request):
return TestingRegistry(request)
def test_injector_plugin_registered_by_name(injectors):
with pytest.raises(KeyError):
InjectorRegistry.instance()['top']
injectors.add_injector(TopInjector)
# After registering, no longer raise a key error.
InjectorRegistry.instance()['top']
def test_wsgi_middleware_lookup_injector():
injector_middleware = make_injector(None, {})
# Default is the topbottom injector
assert isinstance(injector_middleware.injector, TopBottomInjector)
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='foo')
def test_wsgi_middleware_lookup_injector_register(injectors):
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='top')
injectors.add_injector(TopInjector)
# After registering, no longer raise a Configuration Error.
make_injector(None, {}, injector='top')
|
Add some tests for the injector registry
|
Add some tests for the injector registry
|
Python
|
bsd-3-clause
|
fanstatic/fanstatic,fanstatic/fanstatic
|
Add some tests for the injector registry
|
import pytest
from fanstatic import Library, Resource, NeededResources
from fanstatic.injector import InjectorPlugin
from fanstatic.registry import InjectorRegistry
from fanstatic import make_injector
from fanstatic import ConfigurationError
from fanstatic.injector import TopBottomInjector
class TopInjector(InjectorPlugin):
name = 'top'
def __call__(self, html, needed):
needed_html = self.make_inclusion(needed).render()
return html.replace('<head>', '<head>%s' % needed_html, 1)
def test_injector_based_on_injectorplugin():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', bottom=True)
needed = NeededResources(resources=[a,b])
inj = TopInjector({})
html = b'<html><head></head><body></body></html>'
assert inj(html, needed) == \
'''<html><head><link rel="stylesheet" type="text/css" href="/fanstatic/foo/a.css" />
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" /></head><body></body></html>'''
class TestingRegistry(object):
def __init__(self, request):
self.request = request
def add_injector(self, injector):
return self._register_injector(InjectorRegistry, injector)
def _register_injector(self, registry, injector):
self.request.addfinalizer(
lambda: registry.instance().pop(injector.name))
registry.instance().add(injector)
return injector
@pytest.fixture
def injectors(request):
return TestingRegistry(request)
def test_injector_plugin_registered_by_name(injectors):
with pytest.raises(KeyError):
InjectorRegistry.instance()['top']
injectors.add_injector(TopInjector)
# After registering, no longer raise a key error.
InjectorRegistry.instance()['top']
def test_wsgi_middleware_lookup_injector():
injector_middleware = make_injector(None, {})
# Default is the topbottom injector
assert isinstance(injector_middleware.injector, TopBottomInjector)
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='foo')
def test_wsgi_middleware_lookup_injector_register(injectors):
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='top')
injectors.add_injector(TopInjector)
# After registering, no longer raise a Configuration Error.
make_injector(None, {}, injector='top')
|
<commit_before><commit_msg>Add some tests for the injector registry<commit_after>
|
import pytest
from fanstatic import Library, Resource, NeededResources
from fanstatic.injector import InjectorPlugin
from fanstatic.registry import InjectorRegistry
from fanstatic import make_injector
from fanstatic import ConfigurationError
from fanstatic.injector import TopBottomInjector
class TopInjector(InjectorPlugin):
name = 'top'
def __call__(self, html, needed):
needed_html = self.make_inclusion(needed).render()
return html.replace('<head>', '<head>%s' % needed_html, 1)
def test_injector_based_on_injectorplugin():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', bottom=True)
needed = NeededResources(resources=[a,b])
inj = TopInjector({})
html = b'<html><head></head><body></body></html>'
assert inj(html, needed) == \
'''<html><head><link rel="stylesheet" type="text/css" href="/fanstatic/foo/a.css" />
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" /></head><body></body></html>'''
class TestingRegistry(object):
def __init__(self, request):
self.request = request
def add_injector(self, injector):
return self._register_injector(InjectorRegistry, injector)
def _register_injector(self, registry, injector):
self.request.addfinalizer(
lambda: registry.instance().pop(injector.name))
registry.instance().add(injector)
return injector
@pytest.fixture
def injectors(request):
return TestingRegistry(request)
def test_injector_plugin_registered_by_name(injectors):
with pytest.raises(KeyError):
InjectorRegistry.instance()['top']
injectors.add_injector(TopInjector)
# After registering, no longer raise a key error.
InjectorRegistry.instance()['top']
def test_wsgi_middleware_lookup_injector():
injector_middleware = make_injector(None, {})
# Default is the topbottom injector
assert isinstance(injector_middleware.injector, TopBottomInjector)
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='foo')
def test_wsgi_middleware_lookup_injector_register(injectors):
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='top')
injectors.add_injector(TopInjector)
# After registering, no longer raise a Configuration Error.
make_injector(None, {}, injector='top')
|
Add some tests for the injector registryimport pytest
from fanstatic import Library, Resource, NeededResources
from fanstatic.injector import InjectorPlugin
from fanstatic.registry import InjectorRegistry
from fanstatic import make_injector
from fanstatic import ConfigurationError
from fanstatic.injector import TopBottomInjector
class TopInjector(InjectorPlugin):
name = 'top'
def __call__(self, html, needed):
needed_html = self.make_inclusion(needed).render()
return html.replace('<head>', '<head>%s' % needed_html, 1)
def test_injector_based_on_injectorplugin():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', bottom=True)
needed = NeededResources(resources=[a,b])
inj = TopInjector({})
html = b'<html><head></head><body></body></html>'
assert inj(html, needed) == \
'''<html><head><link rel="stylesheet" type="text/css" href="/fanstatic/foo/a.css" />
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" /></head><body></body></html>'''
class TestingRegistry(object):
def __init__(self, request):
self.request = request
def add_injector(self, injector):
return self._register_injector(InjectorRegistry, injector)
def _register_injector(self, registry, injector):
self.request.addfinalizer(
lambda: registry.instance().pop(injector.name))
registry.instance().add(injector)
return injector
@pytest.fixture
def injectors(request):
return TestingRegistry(request)
def test_injector_plugin_registered_by_name(injectors):
with pytest.raises(KeyError):
InjectorRegistry.instance()['top']
injectors.add_injector(TopInjector)
# After registering, no longer raise a key error.
InjectorRegistry.instance()['top']
def test_wsgi_middleware_lookup_injector():
injector_middleware = make_injector(None, {})
# Default is the topbottom injector
assert isinstance(injector_middleware.injector, TopBottomInjector)
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='foo')
def test_wsgi_middleware_lookup_injector_register(injectors):
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='top')
injectors.add_injector(TopInjector)
# After registering, no longer raise a Configuration Error.
make_injector(None, {}, injector='top')
|
<commit_before><commit_msg>Add some tests for the injector registry<commit_after>import pytest
from fanstatic import Library, Resource, NeededResources
from fanstatic.injector import InjectorPlugin
from fanstatic.registry import InjectorRegistry
from fanstatic import make_injector
from fanstatic import ConfigurationError
from fanstatic.injector import TopBottomInjector
class TopInjector(InjectorPlugin):
name = 'top'
def __call__(self, html, needed):
needed_html = self.make_inclusion(needed).render()
return html.replace('<head>', '<head>%s' % needed_html, 1)
def test_injector_based_on_injectorplugin():
foo = Library('foo', '')
a = Resource(foo, 'a.css')
b = Resource(foo, 'b.css', bottom=True)
needed = NeededResources(resources=[a,b])
inj = TopInjector({})
html = b'<html><head></head><body></body></html>'
assert inj(html, needed) == \
'''<html><head><link rel="stylesheet" type="text/css" href="/fanstatic/foo/a.css" />
<link rel="stylesheet" type="text/css" href="/fanstatic/foo/b.css" /></head><body></body></html>'''
class TestingRegistry(object):
def __init__(self, request):
self.request = request
def add_injector(self, injector):
return self._register_injector(InjectorRegistry, injector)
def _register_injector(self, registry, injector):
self.request.addfinalizer(
lambda: registry.instance().pop(injector.name))
registry.instance().add(injector)
return injector
@pytest.fixture
def injectors(request):
return TestingRegistry(request)
def test_injector_plugin_registered_by_name(injectors):
with pytest.raises(KeyError):
InjectorRegistry.instance()['top']
injectors.add_injector(TopInjector)
# After registering, no longer raise a key error.
InjectorRegistry.instance()['top']
def test_wsgi_middleware_lookup_injector():
injector_middleware = make_injector(None, {})
# Default is the topbottom injector
assert isinstance(injector_middleware.injector, TopBottomInjector)
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='foo')
def test_wsgi_middleware_lookup_injector_register(injectors):
with pytest.raises(ConfigurationError):
make_injector(None, {}, injector='top')
injectors.add_injector(TopInjector)
# After registering, no longer raise a Configuration Error.
make_injector(None, {}, injector='top')
|
|
cef9eb64e9d1cbb87ef1328997334266f599c936
|
pylearn2/scripts/tests/test_train.py
|
pylearn2/scripts/tests/test_train.py
|
"""
A unit test for the train.py script
"""
import os
import pylearn2
from pylearn2.scripts.train import train
def test_train_cmd():
"""
Calls the train.py script with a short YAML file
to see if it trains without error
"""
train(os.path.join(pylearn2.__path__[0],
"scripts/autoencoder_example/dae.yaml"))
|
Add unit test for train.py
|
Add unit test for train.py
|
Python
|
bsd-3-clause
|
junbochen/pylearn2,kose-y/pylearn2,ddboline/pylearn2,fyffyt/pylearn2,lisa-lab/pylearn2,se4u/pylearn2,w1kke/pylearn2,pombredanne/pylearn2,lamblin/pylearn2,jamessergeant/pylearn2,junbochen/pylearn2,daemonmaker/pylearn2,jeremyfix/pylearn2,jeremyfix/pylearn2,hantek/pylearn2,bartvm/pylearn2,fyffyt/pylearn2,hantek/pylearn2,alexjc/pylearn2,Refefer/pylearn2,CIFASIS/pylearn2,chrish42/pylearn,ashhher3/pylearn2,fishcorn/pylearn2,fyffyt/pylearn2,se4u/pylearn2,msingh172/pylearn2,lunyang/pylearn2,Refefer/pylearn2,lunyang/pylearn2,hyqneuron/pylearn2-maxsom,fishcorn/pylearn2,mclaughlin6464/pylearn2,jamessergeant/pylearn2,hyqneuron/pylearn2-maxsom,JesseLivezey/pylearn2,JesseLivezey/plankton,TNick/pylearn2,kastnerkyle/pylearn2,pkainz/pylearn2,matrogers/pylearn2,daemonmaker/pylearn2,woozzu/pylearn2,mclaughlin6464/pylearn2,hyqneuron/pylearn2-maxsom,chrish42/pylearn,pkainz/pylearn2,alexjc/pylearn2,mkraemer67/pylearn2,chrish42/pylearn,JesseLivezey/pylearn2,w1kke/pylearn2,ashhher3/pylearn2,ddboline/pylearn2,fulmicoton/pylearn2,woozzu/pylearn2,lancezlin/pylearn2,pkainz/pylearn2,mclaughlin6464/pylearn2,lancezlin/pylearn2,jamessergeant/pylearn2,abergeron/pylearn2,JesseLivezey/pylearn2,TNick/pylearn2,daemonmaker/pylearn2,ddboline/pylearn2,hantek/pylearn2,skearnes/pylearn2,abergeron/pylearn2,KennethPierce/pylearnk,fulmicoton/pylearn2,shiquanwang/pylearn2,pombredanne/pylearn2,lamblin/pylearn2,JesseLivezey/pylearn2,mclaughlin6464/pylearn2,ashhher3/pylearn2,nouiz/pylearn2,lisa-lab/pylearn2,fulmicoton/pylearn2,ashhher3/pylearn2,junbochen/pylearn2,lamblin/pylearn2,mkraemer67/pylearn2,goodfeli/pylearn2,cosmoharrigan/pylearn2,theoryno3/pylearn2,jeremyfix/pylearn2,kose-y/pylearn2,caidongyun/pylearn2,skearnes/pylearn2,CIFASIS/pylearn2,jamessergeant/pylearn2,lancezlin/pylearn2,cosmoharrigan/pylearn2,bartvm/pylearn2,CIFASIS/pylearn2,w1kke/pylearn2,KennethPierce/pylearnk,skearnes/pylearn2,Refefer/pylearn2,msingh172/pylearn2,pombredanne/pylearn2,sandeepkbhat/pylearn2,lancezlin/pylearn2,TNick/pylearn2,junbochen/pylearn2,matrogers/pylearn2,fishcorn/pylearn2,hantek/pylearn2,theoryno3/pylearn2,aalmah/pylearn2,fulmicoton/pylearn2,woozzu/pylearn2,se4u/pylearn2,sandeepkbhat/pylearn2,pombredanne/pylearn2,hyqneuron/pylearn2-maxsom,shiquanwang/pylearn2,mkraemer67/pylearn2,se4u/pylearn2,kastnerkyle/pylearn2,woozzu/pylearn2,alexjc/pylearn2,goodfeli/pylearn2,theoryno3/pylearn2,KennethPierce/pylearnk,nouiz/pylearn2,aalmah/pylearn2,cosmoharrigan/pylearn2,kastnerkyle/pylearn2,CIFASIS/pylearn2,lisa-lab/pylearn2,shiquanwang/pylearn2,jeremyfix/pylearn2,sandeepkbhat/pylearn2,nouiz/pylearn2,lisa-lab/pylearn2,bartvm/pylearn2,sandeepkbhat/pylearn2,pkainz/pylearn2,caidongyun/pylearn2,daemonmaker/pylearn2,nouiz/pylearn2,JesseLivezey/plankton,mkraemer67/pylearn2,ddboline/pylearn2,lunyang/pylearn2,caidongyun/pylearn2,JesseLivezey/plankton,Refefer/pylearn2,lamblin/pylearn2,msingh172/pylearn2,fyffyt/pylearn2,bartvm/pylearn2,msingh172/pylearn2,matrogers/pylearn2,TNick/pylearn2,caidongyun/pylearn2,lunyang/pylearn2,cosmoharrigan/pylearn2,aalmah/pylearn2,chrish42/pylearn,kose-y/pylearn2,kose-y/pylearn2,w1kke/pylearn2,goodfeli/pylearn2,shiquanwang/pylearn2,matrogers/pylearn2,abergeron/pylearn2,JesseLivezey/plankton,abergeron/pylearn2,aalmah/pylearn2,skearnes/pylearn2,theoryno3/pylearn2,KennethPierce/pylearnk,goodfeli/pylearn2,kastnerkyle/pylearn2,alexjc/pylearn2,fishcorn/pylearn2
|
Add unit test for train.py
|
"""
A unit test for the train.py script
"""
import os
import pylearn2
from pylearn2.scripts.train import train
def test_train_cmd():
"""
Calls the train.py script with a short YAML file
to see if it trains without error
"""
train(os.path.join(pylearn2.__path__[0],
"scripts/autoencoder_example/dae.yaml"))
|
<commit_before><commit_msg>Add unit test for train.py<commit_after>
|
"""
A unit test for the train.py script
"""
import os
import pylearn2
from pylearn2.scripts.train import train
def test_train_cmd():
"""
Calls the train.py script with a short YAML file
to see if it trains without error
"""
train(os.path.join(pylearn2.__path__[0],
"scripts/autoencoder_example/dae.yaml"))
|
Add unit test for train.py"""
A unit test for the train.py script
"""
import os
import pylearn2
from pylearn2.scripts.train import train
def test_train_cmd():
"""
Calls the train.py script with a short YAML file
to see if it trains without error
"""
train(os.path.join(pylearn2.__path__[0],
"scripts/autoencoder_example/dae.yaml"))
|
<commit_before><commit_msg>Add unit test for train.py<commit_after>"""
A unit test for the train.py script
"""
import os
import pylearn2
from pylearn2.scripts.train import train
def test_train_cmd():
"""
Calls the train.py script with a short YAML file
to see if it trains without error
"""
train(os.path.join(pylearn2.__path__[0],
"scripts/autoencoder_example/dae.yaml"))
|
|
836b483e06f1221f35d543e534ddfab6020b06ed
|
bounce.py
|
bounce.py
|
import curses
import random
def bounce(stdscr):
curses.curs_set(0)
height, width = stdscr.getmaxyx()
ypos, xpos = random.randrange(0, height - 1), random.randrange(0, width - 1)
ydir, xdir = 1, 1
stdscr.addstr(height//2, width//2, "Type a character")
stdscr.nodelay(1)
c = None
while True:
c = get_chr(stdscr) or c
if not c:
continue
stdscr.clear()
if ypos in {0, height - 1}:
ydir *= -1
if xpos in {0, width - 1}:
xdir *= -1
ypos += ydir
xpos += xdir
stdscr.addstr(ypos, xpos, c)
curses.delay_output(50)
stdscr.refresh()
def get_chr(stdscr):
try:
c = stdscr.getkey()
except curses.error:
return None
if c == 'q':
exit(0)
return c
curses.wrapper(bounce)
|
Add a bouncing character example
|
Add a bouncing character example
|
Python
|
mit
|
asmeurer/curses_tests
|
Add a bouncing character example
|
import curses
import random
def bounce(stdscr):
curses.curs_set(0)
height, width = stdscr.getmaxyx()
ypos, xpos = random.randrange(0, height - 1), random.randrange(0, width - 1)
ydir, xdir = 1, 1
stdscr.addstr(height//2, width//2, "Type a character")
stdscr.nodelay(1)
c = None
while True:
c = get_chr(stdscr) or c
if not c:
continue
stdscr.clear()
if ypos in {0, height - 1}:
ydir *= -1
if xpos in {0, width - 1}:
xdir *= -1
ypos += ydir
xpos += xdir
stdscr.addstr(ypos, xpos, c)
curses.delay_output(50)
stdscr.refresh()
def get_chr(stdscr):
try:
c = stdscr.getkey()
except curses.error:
return None
if c == 'q':
exit(0)
return c
curses.wrapper(bounce)
|
<commit_before><commit_msg>Add a bouncing character example<commit_after>
|
import curses
import random
def bounce(stdscr):
curses.curs_set(0)
height, width = stdscr.getmaxyx()
ypos, xpos = random.randrange(0, height - 1), random.randrange(0, width - 1)
ydir, xdir = 1, 1
stdscr.addstr(height//2, width//2, "Type a character")
stdscr.nodelay(1)
c = None
while True:
c = get_chr(stdscr) or c
if not c:
continue
stdscr.clear()
if ypos in {0, height - 1}:
ydir *= -1
if xpos in {0, width - 1}:
xdir *= -1
ypos += ydir
xpos += xdir
stdscr.addstr(ypos, xpos, c)
curses.delay_output(50)
stdscr.refresh()
def get_chr(stdscr):
try:
c = stdscr.getkey()
except curses.error:
return None
if c == 'q':
exit(0)
return c
curses.wrapper(bounce)
|
Add a bouncing character exampleimport curses
import random
def bounce(stdscr):
curses.curs_set(0)
height, width = stdscr.getmaxyx()
ypos, xpos = random.randrange(0, height - 1), random.randrange(0, width - 1)
ydir, xdir = 1, 1
stdscr.addstr(height//2, width//2, "Type a character")
stdscr.nodelay(1)
c = None
while True:
c = get_chr(stdscr) or c
if not c:
continue
stdscr.clear()
if ypos in {0, height - 1}:
ydir *= -1
if xpos in {0, width - 1}:
xdir *= -1
ypos += ydir
xpos += xdir
stdscr.addstr(ypos, xpos, c)
curses.delay_output(50)
stdscr.refresh()
def get_chr(stdscr):
try:
c = stdscr.getkey()
except curses.error:
return None
if c == 'q':
exit(0)
return c
curses.wrapper(bounce)
|
<commit_before><commit_msg>Add a bouncing character example<commit_after>import curses
import random
def bounce(stdscr):
curses.curs_set(0)
height, width = stdscr.getmaxyx()
ypos, xpos = random.randrange(0, height - 1), random.randrange(0, width - 1)
ydir, xdir = 1, 1
stdscr.addstr(height//2, width//2, "Type a character")
stdscr.nodelay(1)
c = None
while True:
c = get_chr(stdscr) or c
if not c:
continue
stdscr.clear()
if ypos in {0, height - 1}:
ydir *= -1
if xpos in {0, width - 1}:
xdir *= -1
ypos += ydir
xpos += xdir
stdscr.addstr(ypos, xpos, c)
curses.delay_output(50)
stdscr.refresh()
def get_chr(stdscr):
try:
c = stdscr.getkey()
except curses.error:
return None
if c == 'q':
exit(0)
return c
curses.wrapper(bounce)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.