commit stringlengths 40 40 | old_file stringlengths 4 118 | new_file stringlengths 4 118 | old_contents stringlengths 0 2.94k | new_contents stringlengths 1 4.43k | subject stringlengths 15 444 | message stringlengths 16 3.45k | lang stringclasses 1 value | license stringclasses 13 values | repos stringlengths 5 43.2k | prompt stringlengths 17 4.58k | response stringlengths 1 4.43k | prompt_tagged stringlengths 58 4.62k | response_tagged stringlengths 1 4.43k | text stringlengths 132 7.29k | text_tagged stringlengths 173 7.33k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
04fc49427a1288b259610c98d7cd82b21e1a79a4 | artists/views.py | artists/views.py | from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
| from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
| Allow PUT request to /similarities/:id | Allow PUT request to /similarities/:id
| Python | bsd-3-clause | FreeMusicNinja/api.freemusic.ninja | from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
Allow PUT request to /similarities/:id | from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
| <commit_before>from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
<commit_msg>Allow PUT request to /similarities/:id<commit_after> | from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
| from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
Allow PUT request to /similarities/:idfrom django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
| <commit_before>from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
<commit_msg>Allow PUT request to /similarities/:id<commit_after>from django.shortcuts import get_object_or_404
from rest_framework import permissions, viewsets
from similarities.utils import get_similar
from .models import Artist
from similarities.models import UserSimilarity
from .serializers import ArtistSerializer, SimilaritySerializer
class ArtistViewSet(viewsets.ModelViewSet):
"""API endpoint that allows artists to be viewed or edited"""
queryset = Artist.objects.all()
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def get_queryset(self):
name = self.request.GET.get('name', "")
if name:
qs = get_similar(name)
else:
qs = super().get_queryset()
return qs[:100]
class SimilarViewSet(viewsets.ModelViewSet):
queryset = UserSimilarity.objects.all()
serializer_class = SimilaritySerializer
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ['get', 'post', 'put', 'delete']
filter_fields = ['cc_artist']
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user)
def pre_save(self, obj):
obj.user = self.request.user
|
2566b5698b17ca0d193ef6bf3b22aca91b711222 | alembic/env.py | alembic/env.py | from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| Fix more db connection strings | Fix more db connection strings
| Python | apache-2.0 | mrphlip/lrrbot,mrphlip/lrrbot,andreasots/lrrbot,andreasots/lrrbot,andreasots/lrrbot,mrphlip/lrrbot | from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
Fix more db connection strings | from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| <commit_before>from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
<commit_msg>Fix more db connection strings<commit_after> | from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
Fix more db connection stringsfrom __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| <commit_before>from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgres:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
<commit_msg>Fix more db connection strings<commit_after>from __future__ import with_statement
from alembic import context
import sqlalchemy
import logging
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
context.configure(url=context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'),
target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = sqlalchemy.create_engine(context.config.get_section_option("lrrbot", "postgres", 'postgresql:///lrrbot'))
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
567a786e9d20c4519d72c37531d99e8d33b178af | my_module/metadata.py | my_module/metadata.py | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2013 ' + authors_string
url = 'http://example.com/'
| # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2015 ' + authors_string
url = 'http://example.com/'
| Update copyright date to 2015. | Update copyright date to 2015. | Python | mit | mishin/python-project-template,seanfisk/python-project-template,seanfisk/python-project-template,shekkbuilder/python-project-template,mishin/python-project-template,shekkbuilder/python-project-template | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2013 ' + authors_string
url = 'http://example.com/'
Update copyright date to 2015. | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2015 ' + authors_string
url = 'http://example.com/'
| <commit_before># -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2013 ' + authors_string
url = 'http://example.com/'
<commit_msg>Update copyright date to 2015.<commit_after> | # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2015 ' + authors_string
url = 'http://example.com/'
| # -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2013 ' + authors_string
url = 'http://example.com/'
Update copyright date to 2015.# -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2015 ' + authors_string
url = 'http://example.com/'
| <commit_before># -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2013 ' + authors_string
url = 'http://example.com/'
<commit_msg>Update copyright date to 2015.<commit_after># -*- coding: utf-8 -*-
"""Project metadata
Information describing the project.
"""
# The package name, which is also the "UNIX name" for the project.
package = 'my_module'
project = "My Awesome Module"
project_no_spaces = project.replace(' ', '')
version = '0.1'
description = 'It does cool things'
authors = ['Foo Bar', 'John Doe']
authors_string = ', '.join(authors)
emails = ['foobar@example.com', 'johndoe@thisisfake.org']
license = 'MIT'
copyright = '2015 ' + authors_string
url = 'http://example.com/'
|
6427ef6e05e3add17533c0a86603943c85020eb6 | inonemonth/challenges/templatetags/challenges_extras.py | inonemonth/challenges/templatetags/challenges_extras.py | from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "Clencher (de.rouck.robrecht@gmail.com)"
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
| from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "{0} ({1})".format(role.type.capitalize(), role.user.email)
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
| Increase abstractness for one test method | Increase abstractness for one test method
| Python | mit | robrechtdr/inonemonth,robrechtdr/inonemonth,robrechtdr/inonemonth,robrechtdr/inonemonth | from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "Clencher (de.rouck.robrecht@gmail.com)"
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
Increase abstractness for one test method | from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "{0} ({1})".format(role.type.capitalize(), role.user.email)
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
| <commit_before>from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "Clencher (de.rouck.robrecht@gmail.com)"
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
<commit_msg>Increase abstractness for one test method<commit_after> | from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "{0} ({1})".format(role.type.capitalize(), role.user.email)
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
| from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "Clencher (de.rouck.robrecht@gmail.com)"
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
Increase abstractness for one test methodfrom django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "{0} ({1})".format(role.type.capitalize(), role.user.email)
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
| <commit_before>from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "Clencher (de.rouck.robrecht@gmail.com)"
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
<commit_msg>Increase abstractness for one test method<commit_after>from django.template import Library
register = Library()
@register.filter
def get_representation_for_user(role, user_role):
if user_role.type == "juror":
if role.type == "clencher":
return "{0} ({1})".format(role.type.capitalize(), role.user.email)
elif role.type == "juror":
if role == user_role:
return "Juror 1 (me)"
else:
return "Juror 2"
else:
return Exception("Else Die")
elif user_role.type == "clencher":
if role.type == "clencher":
return "Clencher (me)"
elif role.type == "juror":
return "Juror 1 (andy.slacker@gmail.com)"
else:
return Exception("Else Die")
else:
return Exception("Else Die")
|
bd3ff327bd04a6688ce7f39964394f921dec6705 | RevitPyCVC/Outils/copier_type_vues.py | RevitPyCVC/Outils/copier_type_vues.py | from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
t.Commit()
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
__window__.Close()
| from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
t.Commit() | Add GUI for 3D Rotation script | Add GUI for 3D Rotation script
| Python | mit | Nahouhak/pythoncvc.net,Nahouhak/pythoncvc.net | from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
t.Commit()
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
__window__.Close()
Add GUI for 3D Rotation script | from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
t.Commit() | <commit_before>from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
t.Commit()
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
__window__.Close()
<commit_msg>Add GUI for 3D Rotation script<commit_after> | from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
t.Commit() | from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
t.Commit()
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
__window__.Close()
Add GUI for 3D Rotation scriptfrom Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
t.Commit() | <commit_before>from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
t.Commit()
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
__window__.Close()
<commit_msg>Add GUI for 3D Rotation script<commit_after>from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from System.Collections.Generic import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
getselection = uidoc.Selection.GetElementIds
app = __revit__.Application
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import UIApplication
def alert(msg):
TaskDialog.Show('RevitPythonShell', msg)
def quit():
__window__.Close()
exit = quit
t = Transaction(doc, "Copie tous les gabarits de vue")
t.Start()
try:
ids = List[ElementId]()
for e in FilteredElementCollector(doc).OfClass(ViewFamilyType): #Cherche l'Id des éléments sélectionnés
ids.Add(e.Id)
ld = {}
for n, d in enumerate(app.Documents):
ld[n] = d.Title, d
for i in ld:
print i, ld[i][0]
autreDoc = ld[2][1]
cp_opts = CopyPasteOptions()
ElementTransformUtils.CopyElements(doc, ids, autreDoc, Transform.Identity, cp_opts)
except:
# print a stack trace and error messages for debugging
import traceback
traceback.print_exc()
t.RollBack()
else:
# no errors, so just close the window
t.Commit() |
b158de35c08aa78578f374f125884607468e67d1 | glance/registry/__init__.py | glance/registry/__init__.py | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_('Address to find the registry server.')),
cfg.PortOpt('registry_port', default=9191,
help=_('Port the registry server is listening on.')),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
| # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_("""
Address the registry server is hosted on.
Possible values:
* A valid IP or hostname
Related options:
* None
""")),
cfg.PortOpt('registry_port', default=9191,
help=_("""
Port the registry server is listening on.
Possible values:
* A valid port number
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
| Improve help text of registry server opts | Improve help text of registry server opts
Partial-Bug: #1570946
Change-Id: Iad255d3ab5d96b91f897731f4f29cd804d6b1840
| Python | apache-2.0 | openstack/glance,rajalokan/glance,rajalokan/glance,stevelle/glance,openstack/glance,openstack/glance,stevelle/glance | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_('Address to find the registry server.')),
cfg.PortOpt('registry_port', default=9191,
help=_('Port the registry server is listening on.')),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
Improve help text of registry server opts
Partial-Bug: #1570946
Change-Id: Iad255d3ab5d96b91f897731f4f29cd804d6b1840 | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_("""
Address the registry server is hosted on.
Possible values:
* A valid IP or hostname
Related options:
* None
""")),
cfg.PortOpt('registry_port', default=9191,
help=_("""
Port the registry server is listening on.
Possible values:
* A valid port number
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
| <commit_before># Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_('Address to find the registry server.')),
cfg.PortOpt('registry_port', default=9191,
help=_('Port the registry server is listening on.')),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
<commit_msg>Improve help text of registry server opts
Partial-Bug: #1570946
Change-Id: Iad255d3ab5d96b91f897731f4f29cd804d6b1840<commit_after> | # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_("""
Address the registry server is hosted on.
Possible values:
* A valid IP or hostname
Related options:
* None
""")),
cfg.PortOpt('registry_port', default=9191,
help=_("""
Port the registry server is listening on.
Possible values:
* A valid port number
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
| # Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_('Address to find the registry server.')),
cfg.PortOpt('registry_port', default=9191,
help=_('Port the registry server is listening on.')),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
Improve help text of registry server opts
Partial-Bug: #1570946
Change-Id: Iad255d3ab5d96b91f897731f4f29cd804d6b1840# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_("""
Address the registry server is hosted on.
Possible values:
* A valid IP or hostname
Related options:
* None
""")),
cfg.PortOpt('registry_port', default=9191,
help=_("""
Port the registry server is listening on.
Possible values:
* A valid port number
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
| <commit_before># Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_('Address to find the registry server.')),
cfg.PortOpt('registry_port', default=9191,
help=_('Port the registry server is listening on.')),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
<commit_msg>Improve help text of registry server opts
Partial-Bug: #1570946
Change-Id: Iad255d3ab5d96b91f897731f4f29cd804d6b1840<commit_after># Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Registry API
"""
from oslo_config import cfg
from glance.i18n import _
registry_addr_opts = [
cfg.StrOpt('registry_host', default='0.0.0.0',
help=_("""
Address the registry server is hosted on.
Possible values:
* A valid IP or hostname
Related options:
* None
""")),
cfg.PortOpt('registry_port', default=9191,
help=_("""
Port the registry server is listening on.
Possible values:
* A valid port number
Related options:
* None
""")),
]
CONF = cfg.CONF
CONF.register_opts(registry_addr_opts)
|
e8d2d067c79bf133f98312c328a0e72af6b65f96 | go/apps/jsbox/definition.py | go/apps/jsbox/definition.py | from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
return []
| from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
# TODO: make jsbox apps define these explicitly and
# update the outbound resource to check and
# complain if a jsbox app sends on an endpoint
# it hasn't defined.
app_config = config.get("jsbox_app_config", {})
sandbox_config = app_config.get("config", {})
sms_tag = sandbox_config.get("sms_tag")
try:
pool, tag = sms_tag
except Exception:
return []
return ["%s:%s" % (pool, tag)]
| Implement extra endpoints for Javascript apps. | Implement extra endpoints for Javascript apps.
| Python | bsd-3-clause | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
return []
Implement extra endpoints for Javascript apps. | from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
# TODO: make jsbox apps define these explicitly and
# update the outbound resource to check and
# complain if a jsbox app sends on an endpoint
# it hasn't defined.
app_config = config.get("jsbox_app_config", {})
sandbox_config = app_config.get("config", {})
sms_tag = sandbox_config.get("sms_tag")
try:
pool, tag = sms_tag
except Exception:
return []
return ["%s:%s" % (pool, tag)]
| <commit_before>from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
return []
<commit_msg>Implement extra endpoints for Javascript apps.<commit_after> | from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
# TODO: make jsbox apps define these explicitly and
# update the outbound resource to check and
# complain if a jsbox app sends on an endpoint
# it hasn't defined.
app_config = config.get("jsbox_app_config", {})
sandbox_config = app_config.get("config", {})
sms_tag = sandbox_config.get("sms_tag")
try:
pool, tag = sms_tag
except Exception:
return []
return ["%s:%s" % (pool, tag)]
| from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
return []
Implement extra endpoints for Javascript apps.from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
# TODO: make jsbox apps define these explicitly and
# update the outbound resource to check and
# complain if a jsbox app sends on an endpoint
# it hasn't defined.
app_config = config.get("jsbox_app_config", {})
sandbox_config = app_config.get("config", {})
sms_tag = sandbox_config.get("sms_tag")
try:
pool, tag = sms_tag
except Exception:
return []
return ["%s:%s" % (pool, tag)]
| <commit_before>from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
return []
<commit_msg>Implement extra endpoints for Javascript apps.<commit_after>from go.vumitools.conversation.definition import (
ConversationDefinitionBase, ConversationAction)
class ViewLogsAction(ConversationAction):
action_name = 'view_logs'
action_display_name = 'View Sandbox Logs'
redirect_to = 'jsbox_logs'
class ConversationDefinition(ConversationDefinitionBase):
conversation_type = 'jsbox'
conversation_display_name = 'Javascript App'
actions = (ViewLogsAction,)
def configured_endpoints(self, config):
# TODO: make jsbox apps define these explicitly and
# update the outbound resource to check and
# complain if a jsbox app sends on an endpoint
# it hasn't defined.
app_config = config.get("jsbox_app_config", {})
sandbox_config = app_config.get("config", {})
sms_tag = sandbox_config.get("sms_tag")
try:
pool, tag = sms_tag
except Exception:
return []
return ["%s:%s" % (pool, tag)]
|
62c573fadad1b0268353c2dc21c35ac5b645052a | go/dashboard/tests/utils.py | go/dashboard/tests/utils.py | import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self.response = None
def get_requests(self):
return self.requests
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self.response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self.response = response
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
if isinstance(self.response, Exception):
raise self.response
return self.response
| import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self._response = None
@property
def response(self):
if isinstance(self._response, Exception):
raise self._response
return self._response
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self._response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self._response = response
def get_requests(self):
return self.requests
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
return self.response
def raw_request(self, method, url, content=""):
self.requests.append({
'method': method,
'url': url,
'content': content,
})
return self.response
| Add raw_request method for FakeDiamondashApiClient to correspond to the recently added DiamondashApiClient.raw_request() | Add raw_request method for FakeDiamondashApiClient to correspond to the recently added DiamondashApiClient.raw_request()
| Python | bsd-3-clause | praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go | import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self.response = None
def get_requests(self):
return self.requests
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self.response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self.response = response
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
if isinstance(self.response, Exception):
raise self.response
return self.response
Add raw_request method for FakeDiamondashApiClient to correspond to the recently added DiamondashApiClient.raw_request() | import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self._response = None
@property
def response(self):
if isinstance(self._response, Exception):
raise self._response
return self._response
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self._response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self._response = response
def get_requests(self):
return self.requests
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
return self.response
def raw_request(self, method, url, content=""):
self.requests.append({
'method': method,
'url': url,
'content': content,
})
return self.response
| <commit_before>import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self.response = None
def get_requests(self):
return self.requests
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self.response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self.response = response
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
if isinstance(self.response, Exception):
raise self.response
return self.response
<commit_msg>Add raw_request method for FakeDiamondashApiClient to correspond to the recently added DiamondashApiClient.raw_request()<commit_after> | import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self._response = None
@property
def response(self):
if isinstance(self._response, Exception):
raise self._response
return self._response
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self._response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self._response = response
def get_requests(self):
return self.requests
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
return self.response
def raw_request(self, method, url, content=""):
self.requests.append({
'method': method,
'url': url,
'content': content,
})
return self.response
| import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self.response = None
def get_requests(self):
return self.requests
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self.response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self.response = response
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
if isinstance(self.response, Exception):
raise self.response
return self.response
Add raw_request method for FakeDiamondashApiClient to correspond to the recently added DiamondashApiClient.raw_request()import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self._response = None
@property
def response(self):
if isinstance(self._response, Exception):
raise self._response
return self._response
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self._response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self._response = response
def get_requests(self):
return self.requests
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
return self.response
def raw_request(self, method, url, content=""):
self.requests.append({
'method': method,
'url': url,
'content': content,
})
return self.response
| <commit_before>import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self.response = None
def get_requests(self):
return self.requests
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self.response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self.response = response
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
if isinstance(self.response, Exception):
raise self.response
return self.response
<commit_msg>Add raw_request method for FakeDiamondashApiClient to correspond to the recently added DiamondashApiClient.raw_request()<commit_after>import json
from go.dashboard import DiamondashApiError, DiamondashApiClient
class FakeDiamondashApiClient(DiamondashApiClient):
def __init__(self):
self.requests = []
self._response = None
@property
def response(self):
if isinstance(self._response, Exception):
raise self._response
return self._response
def set_error_response(self, code, message):
data = json.dumps({
'success': False,
'message': message
})
self._response = DiamondashApiError("(%s) %s" % (code, data))
def set_response(self, response):
self._response = response
def get_requests(self):
return self.requests
def request(self, method, url, data):
self.requests.append({
'method': method,
'url': url,
'data': data,
})
return self.response
def raw_request(self, method, url, content=""):
self.requests.append({
'method': method,
'url': url,
'content': content,
})
return self.response
|
17a18f72e9e2a7df43d2dafe77a17bfe4777d7aa | avena/image.py | avena/image.py | #!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
| #!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False, ext=None):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename, ext=ext)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
| Add an extension parameter to the save function. | Add an extension parameter to the save function.
| Python | isc | eliteraspberries/avena | #!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
Add an extension parameter to the save function. | #!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False, ext=None):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename, ext=ext)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
| <commit_before>#!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
<commit_msg>Add an extension parameter to the save function.<commit_after> | #!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False, ext=None):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename, ext=ext)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
| #!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
Add an extension parameter to the save function.#!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False, ext=None):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename, ext=ext)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
| <commit_before>#!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
<commit_msg>Add an extension parameter to the save function.<commit_after>#!/usr/bin/env python2
'''Read and write image files as NumPy arrays'''
from numpy import asarray, float32
from PIL import Image
from . import np
from . import utils
_DEFAULT_DTYPE = float32
_PIL_RGB = {
'R': 0,
'G': 1,
'B': 2,
}
def get_channels(img):
'''Return a list of channels of an image array.'''
if utils.depth(img) == 1:
yield img
else:
for i in xrange(utils.depth(img)):
yield img[:, :, i]
def read(filename, dtype=_DEFAULT_DTYPE):
'''Read an image file as an array.'''
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
arr = utils.swap_rgb(arr, _PIL_RGB)
return arr
def _pil_save(img, filename):
pil_img = Image.fromarray(img)
pil_img.save(filename)
return
def save(img, filename, random=False, ext=None):
'''Save an image array and return its path.'''
if random:
newfile = utils.rand_filename(filename, ext=ext)
else:
newfile = filename
np.normalize(img)
uint8img = np.to_uint8(img)
_pil_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
|
719ae63a28a0fdbb379a5ee61a9bb3216ef283bc | awx/main/ha.py | awx/main/ha.py | # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('host', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
| # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('HOST', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
| Fix error causing single-host HA environments to report not HA. | Fix error causing single-host HA environments to report not HA.
| Python | apache-2.0 | snahelou/awx,snahelou/awx,snahelou/awx,wwitzel3/awx,wwitzel3/awx,snahelou/awx,wwitzel3/awx,wwitzel3/awx | # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('host', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
Fix error causing single-host HA environments to report not HA. | # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('HOST', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
| <commit_before># Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('host', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
<commit_msg>Fix error causing single-host HA environments to report not HA.<commit_after> | # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('HOST', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
| # Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('host', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
Fix error causing single-host HA environments to report not HA.# Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('HOST', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
| <commit_before># Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('host', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
<commit_msg>Fix error causing single-host HA environments to report not HA.<commit_after># Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
from django.conf import settings
from awx.main.models import Instance
def is_ha_environment():
"""Return True if this is an HA environment, and False
otherwise.
"""
# If there are two or more instances, then we are in an HA environment.
if Instance.objects.count() > 1:
return True
# If the database is not local, then we are in an HA environment.
host = settings.DATABASES['default'].get('HOST', 'localhost')
if host and host.lower() not in ('127.0.0.1', 'localhost'):
return True
# We are not in an HA environment.
return False
|
c0e77cca647232810299c251ecfc96c0c1e4e12e | funfactory/monkeypatches.py | funfactory/monkeypatches.py | import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
| import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
try:
import jingo.monkey
jingo.monkey.patch()
except ImportError:
# If we can't import jingo.monkey, then it's an older jingo,
# so we go back to the old ways.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
| Use jingo.monkey if it exists | Use jingo.monkey if it exists
This changes funfactory to use jingo.monkey which monkey patches Django
form fields so you don't have to do |safe all the time.
If the available jingo isn't recent enough, then it defaults to using
nuggets safe_django_forms which don't appear to work with Django 1.5
or later.
| Python | bsd-3-clause | mozilla/funfactory,mozilla/funfactory | import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
Use jingo.monkey if it exists
This changes funfactory to use jingo.monkey which monkey patches Django
form fields so you don't have to do |safe all the time.
If the available jingo isn't recent enough, then it defaults to using
nuggets safe_django_forms which don't appear to work with Django 1.5
or later. | import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
try:
import jingo.monkey
jingo.monkey.patch()
except ImportError:
# If we can't import jingo.monkey, then it's an older jingo,
# so we go back to the old ways.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
| <commit_before>import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
<commit_msg>Use jingo.monkey if it exists
This changes funfactory to use jingo.monkey which monkey patches Django
form fields so you don't have to do |safe all the time.
If the available jingo isn't recent enough, then it defaults to using
nuggets safe_django_forms which don't appear to work with Django 1.5
or later.<commit_after> | import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
try:
import jingo.monkey
jingo.monkey.patch()
except ImportError:
# If we can't import jingo.monkey, then it's an older jingo,
# so we go back to the old ways.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
| import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
Use jingo.monkey if it exists
This changes funfactory to use jingo.monkey which monkey patches Django
form fields so you don't have to do |safe all the time.
If the available jingo isn't recent enough, then it defaults to using
nuggets safe_django_forms which don't appear to work with Django 1.5
or later.import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
try:
import jingo.monkey
jingo.monkey.patch()
except ImportError:
# If we can't import jingo.monkey, then it's an older jingo,
# so we go back to the old ways.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
| <commit_before>import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
<commit_msg>Use jingo.monkey if it exists
This changes funfactory to use jingo.monkey which monkey patches Django
form fields so you don't have to do |safe all the time.
If the available jingo isn't recent enough, then it defaults to using
nuggets safe_django_forms which don't appear to work with Django 1.5
or later.<commit_after>import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Import for side-effect: configures logging handlers.
# pylint: disable-msg=W0611
import log_settings # noqa
# Monkey-patch django forms to avoid having to use Jinja2's |safe
# everywhere.
try:
import jingo.monkey
jingo.monkey.patch()
except ImportError:
# If we can't import jingo.monkey, then it's an older jingo,
# so we go back to the old ways.
import safe_django_forms
safe_django_forms.monkeypatch()
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
from . import admin
admin.monkeypatch()
if 'compressor' in settings.INSTALLED_APPS:
import jingo
from compressor.contrib.jinja2ext import CompressorExtension
jingo.env.add_extension(CompressorExtension)
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
|
0501eaca73638ae9c19cd7db2946e28b0a40153e | glaciercmd/cli.py | glaciercmd/cli.py | import logging
import sys
from glaciercmd.gcconfig import GCConfig
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
if __name__ == '__main__':
run()
| import logging
import sys
import argparse
import glob
import os
from glaciercmd.gcconfig import GCConfig
def load_commands():
commands = []
command_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'command_*.py')
command_files = glob.glob(command_dir)
for command_file in command_files:
command = __import__("glaciercmd.{0}".format(os.path.splitext(os.path.basename(command_file))[0]), globals(), locals(), ['command_init'])
commands.append(command.command_init())
return commands
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
commands = load_commands()
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('args', nargs='*')
parsed_args = parser.parse_args()
found_command = False
for command in commands:
if command.accept(parsed_args.args):
command.execute(parsed_args.args, config)
found_command = True
if not found_command:
logging.error('No valid command found.');
if __name__ == '__main__':
run()
| Use command to list vaults | Use command to list vaults
| Python | mit | carsonmcdonald/glacier-cmd | import logging
import sys
from glaciercmd.gcconfig import GCConfig
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
if __name__ == '__main__':
run()
Use command to list vaults | import logging
import sys
import argparse
import glob
import os
from glaciercmd.gcconfig import GCConfig
def load_commands():
commands = []
command_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'command_*.py')
command_files = glob.glob(command_dir)
for command_file in command_files:
command = __import__("glaciercmd.{0}".format(os.path.splitext(os.path.basename(command_file))[0]), globals(), locals(), ['command_init'])
commands.append(command.command_init())
return commands
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
commands = load_commands()
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('args', nargs='*')
parsed_args = parser.parse_args()
found_command = False
for command in commands:
if command.accept(parsed_args.args):
command.execute(parsed_args.args, config)
found_command = True
if not found_command:
logging.error('No valid command found.');
if __name__ == '__main__':
run()
| <commit_before>import logging
import sys
from glaciercmd.gcconfig import GCConfig
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
if __name__ == '__main__':
run()
<commit_msg>Use command to list vaults<commit_after> | import logging
import sys
import argparse
import glob
import os
from glaciercmd.gcconfig import GCConfig
def load_commands():
commands = []
command_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'command_*.py')
command_files = glob.glob(command_dir)
for command_file in command_files:
command = __import__("glaciercmd.{0}".format(os.path.splitext(os.path.basename(command_file))[0]), globals(), locals(), ['command_init'])
commands.append(command.command_init())
return commands
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
commands = load_commands()
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('args', nargs='*')
parsed_args = parser.parse_args()
found_command = False
for command in commands:
if command.accept(parsed_args.args):
command.execute(parsed_args.args, config)
found_command = True
if not found_command:
logging.error('No valid command found.');
if __name__ == '__main__':
run()
| import logging
import sys
from glaciercmd.gcconfig import GCConfig
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
if __name__ == '__main__':
run()
Use command to list vaultsimport logging
import sys
import argparse
import glob
import os
from glaciercmd.gcconfig import GCConfig
def load_commands():
commands = []
command_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'command_*.py')
command_files = glob.glob(command_dir)
for command_file in command_files:
command = __import__("glaciercmd.{0}".format(os.path.splitext(os.path.basename(command_file))[0]), globals(), locals(), ['command_init'])
commands.append(command.command_init())
return commands
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
commands = load_commands()
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('args', nargs='*')
parsed_args = parser.parse_args()
found_command = False
for command in commands:
if command.accept(parsed_args.args):
command.execute(parsed_args.args, config)
found_command = True
if not found_command:
logging.error('No valid command found.');
if __name__ == '__main__':
run()
| <commit_before>import logging
import sys
from glaciercmd.gcconfig import GCConfig
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
if __name__ == '__main__':
run()
<commit_msg>Use command to list vaults<commit_after>import logging
import sys
import argparse
import glob
import os
from glaciercmd.gcconfig import GCConfig
def load_commands():
commands = []
command_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'command_*.py')
command_files = glob.glob(command_dir)
for command_file in command_files:
command = __import__("glaciercmd.{0}".format(os.path.splitext(os.path.basename(command_file))[0]), globals(), locals(), ['command_init'])
commands.append(command.command_init())
return commands
def run():
logging.basicConfig(format="%(asctime)s %(levelname)s %(module)s: %(message)s", level=logging.DEBUG)
commands = load_commands()
config = GCConfig()
if config.has_errors():
config.log_errors()
sys.exit(1)
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('args', nargs='*')
parsed_args = parser.parse_args()
found_command = False
for command in commands:
if command.accept(parsed_args.args):
command.execute(parsed_args.args, config)
found_command = True
if not found_command:
logging.error('No valid command found.');
if __name__ == '__main__':
run()
|
8810bd03df781e7ec20fcc2d0fcc7cbf423e9cdc | conda_kapsel/internal/py2_compat.py | conda_kapsel/internal/py2_compat.py | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
| # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
assert isinstance(key, basestring) # noqa
assert isinstance(key, str)
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
assert isinstance(environ_copy[key], str)
elif not isinstance(value, str):
raise TypeError("Environment contains non-unicode non-str value %r" % value)
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
| Add more assertions about types in environment | Add more assertions about types in environment
Trying to figure out why things are still failing on windows/py27
| Python | bsd-3-clause | conda/kapsel,conda/kapsel | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
Add more assertions about types in environment
Trying to figure out why things are still failing on windows/py27 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
assert isinstance(key, basestring) # noqa
assert isinstance(key, str)
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
assert isinstance(environ_copy[key], str)
elif not isinstance(value, str):
raise TypeError("Environment contains non-unicode non-str value %r" % value)
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
| <commit_before># -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
<commit_msg>Add more assertions about types in environment
Trying to figure out why things are still failing on windows/py27<commit_after> | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
assert isinstance(key, basestring) # noqa
assert isinstance(key, str)
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
assert isinstance(environ_copy[key], str)
elif not isinstance(value, str):
raise TypeError("Environment contains non-unicode non-str value %r" % value)
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
| # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
Add more assertions about types in environment
Trying to figure out why things are still failing on windows/py27# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
assert isinstance(key, basestring) # noqa
assert isinstance(key, str)
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
assert isinstance(environ_copy[key], str)
elif not isinstance(value, str):
raise TypeError("Environment contains non-unicode non-str value %r" % value)
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
| <commit_before># -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
<commit_msg>Add more assertions about types in environment
Trying to figure out why things are still failing on windows/py27<commit_after># -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import platform
import sys
_PY2 = sys.version_info[0] == 2
def is_string(s):
if _PY2: # pragma: no cover (py2/py3)
return isinstance(s, basestring) # pragma: no cover (py2/py3) # noqa
else: # pragma: no cover (py2/py3)
return isinstance(s, str) # pragma: no cover (py2/py3)
def env_without_unicode(environ):
# On Windows / Python 2.7, Popen explodes if given unicode strings in the environment.
if _PY2 and platform.system() == 'Windows': # pragma: no cover (py2/py3)
environ_copy = dict()
for key, value in environ.items():
assert isinstance(key, basestring) # noqa
assert isinstance(key, str)
if isinstance(value, unicode): # noqa
environ_copy[key] = value.encode()
assert isinstance(environ_copy[key], str)
elif not isinstance(value, str):
raise TypeError("Environment contains non-unicode non-str value %r" % value)
else:
environ_copy[key] = value
return environ_copy
else: # pragma: no cover (py2/py3)
return environ
|
2c3ddc18477561f4880c2b857c4aa8a0f8478dfd | src/psycholinguistic_db/psycholinguistic_db_creator.py | src/psycholinguistic_db/psycholinguistic_db_creator.py | #!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
output_file.write(';'.join(word.lower() for word in line.split()) + '\n')
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database') | #!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
from nltk import PorterStemmer
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
self.kf_frequencies = {}
self.syllables = {}
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
items = line.split()
word = PorterStemmer().stem_word(items[2].lower())
kff = items[1]
syl = items[0]
if word in self.kf_frequencies:
# Select the stemmed word with the maximum KF Frequency
if kff > self.kf_frequencies[word]:
self.kf_frequencies[word] = kff
else:
self.kf_frequencies[word] = kff
if word in self.syllables:
# Select the stemmed word with minimum number of syllables
if syl < self.syllables[word]:
self.syllables[word] = syl
else:
self.syllables[word] = syl
# Dump the contents to the output file
for word in self.kf_frequencies:
output_file.write(word + ";" + self.kf_frequencies[word] + ";" + self.syllables[word] + "\n")
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database') | Create the psycholinguistic_db according to our needs | Create the psycholinguistic_db according to our needs
| Python | mit | Somsubhra/Enrich,Somsubhra/Enrich,Somsubhra/Enrich | #!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
output_file.write(';'.join(word.lower() for word in line.split()) + '\n')
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database')Create the psycholinguistic_db according to our needs | #!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
from nltk import PorterStemmer
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
self.kf_frequencies = {}
self.syllables = {}
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
items = line.split()
word = PorterStemmer().stem_word(items[2].lower())
kff = items[1]
syl = items[0]
if word in self.kf_frequencies:
# Select the stemmed word with the maximum KF Frequency
if kff > self.kf_frequencies[word]:
self.kf_frequencies[word] = kff
else:
self.kf_frequencies[word] = kff
if word in self.syllables:
# Select the stemmed word with minimum number of syllables
if syl < self.syllables[word]:
self.syllables[word] = syl
else:
self.syllables[word] = syl
# Dump the contents to the output file
for word in self.kf_frequencies:
output_file.write(word + ";" + self.kf_frequencies[word] + ";" + self.syllables[word] + "\n")
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database') | <commit_before>#!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
output_file.write(';'.join(word.lower() for word in line.split()) + '\n')
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database')<commit_msg>Create the psycholinguistic_db according to our needs<commit_after> | #!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
from nltk import PorterStemmer
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
self.kf_frequencies = {}
self.syllables = {}
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
items = line.split()
word = PorterStemmer().stem_word(items[2].lower())
kff = items[1]
syl = items[0]
if word in self.kf_frequencies:
# Select the stemmed word with the maximum KF Frequency
if kff > self.kf_frequencies[word]:
self.kf_frequencies[word] = kff
else:
self.kf_frequencies[word] = kff
if word in self.syllables:
# Select the stemmed word with minimum number of syllables
if syl < self.syllables[word]:
self.syllables[word] = syl
else:
self.syllables[word] = syl
# Dump the contents to the output file
for word in self.kf_frequencies:
output_file.write(word + ";" + self.kf_frequencies[word] + ";" + self.syllables[word] + "\n")
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database') | #!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
output_file.write(';'.join(word.lower() for word in line.split()) + '\n')
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database')Create the psycholinguistic_db according to our needs#!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
from nltk import PorterStemmer
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
self.kf_frequencies = {}
self.syllables = {}
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
items = line.split()
word = PorterStemmer().stem_word(items[2].lower())
kff = items[1]
syl = items[0]
if word in self.kf_frequencies:
# Select the stemmed word with the maximum KF Frequency
if kff > self.kf_frequencies[word]:
self.kf_frequencies[word] = kff
else:
self.kf_frequencies[word] = kff
if word in self.syllables:
# Select the stemmed word with minimum number of syllables
if syl < self.syllables[word]:
self.syllables[word] = syl
else:
self.syllables[word] = syl
# Dump the contents to the output file
for word in self.kf_frequencies:
output_file.write(word + ";" + self.kf_frequencies[word] + ";" + self.syllables[word] + "\n")
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database') | <commit_before>#!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
output_file.write(';'.join(word.lower() for word in line.split()) + '\n')
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database')<commit_msg>Create the psycholinguistic_db according to our needs<commit_after>#!/usr/bin/env python
# Creates a CSV of psycholinguistic dictionary
# downloaded from web
# Headers
__author__ = 'Somsubhra Bairi'
__email__ = 'somsubhra.bairi@gmail.com'
# All imports
from logger import Logger
from nltk import PorterStemmer
# The psycholinguistic database creator
class PsycholinguisticDbCreator:
# Constructor for the database creator
def __init__(self, in_file, out_file):
self.in_file = in_file
self.out_file = out_file
self.kf_frequencies = {}
self.syllables = {}
# Create the database
def create(self):
Logger.log_message('Creating psycholinguistic dictionary database')
input_file = open(self.in_file, 'r')
output_file = open(self.out_file, 'w')
for line in input_file.readlines():
items = line.split()
word = PorterStemmer().stem_word(items[2].lower())
kff = items[1]
syl = items[0]
if word in self.kf_frequencies:
# Select the stemmed word with the maximum KF Frequency
if kff > self.kf_frequencies[word]:
self.kf_frequencies[word] = kff
else:
self.kf_frequencies[word] = kff
if word in self.syllables:
# Select the stemmed word with minimum number of syllables
if syl < self.syllables[word]:
self.syllables[word] = syl
else:
self.syllables[word] = syl
# Dump the contents to the output file
for word in self.kf_frequencies:
output_file.write(word + ";" + self.kf_frequencies[word] + ";" + self.syllables[word] + "\n")
input_file.close()
output_file.close()
Logger.log_success('Created psycholinguistic dictionary database') |
f55a00cfd81f8f3c88aaaa5a4b3d63ceb4364a11 | books/views.py | books/views.py | from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
| from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
try:
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
except Book.DoesNotExist:
#no book, return to book index
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
| Return book index page if book not found by slug | Return book index page if book not found by slug
| Python | agpl-3.0 | openstax/openstax-cms,Connexions/openstax-cms,openstax/openstax-cms,openstax/openstax-cms,openstax/openstax-cms,Connexions/openstax-cms | from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
Return book index page if book not found by slug | from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
try:
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
except Book.DoesNotExist:
#no book, return to book index
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
| <commit_before>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
<commit_msg>Return book index page if book not found by slug<commit_after> | from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
try:
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
except Book.DoesNotExist:
#no book, return to book index
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
| from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
Return book index page if book not found by slugfrom django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
try:
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
except Book.DoesNotExist:
#no book, return to book index
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
| <commit_before>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
<commit_msg>Return book index page if book not found by slug<commit_after>from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from .models import BookIndex, Book
@csrf_exempt
def book_index(request):
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
@csrf_exempt
def book_detail(request, slug):
try:
page = Book.objects.get(slug=slug)
return redirect('/api/v2/pages/{}'.format(page.pk))
except Book.DoesNotExist:
#no book, return to book index
page = BookIndex.objects.all()[0]
return redirect('/api/v2/pages/{}'.format(page.pk))
|
ba57cf57d3e075d5260fb19304ec0e9f903c3ed0 | pajbot/actions.py | pajbot/actions.py | import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, f, args=[], kwargs={}):
action = Action(f, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
| import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, func, args=[], kwargs={}):
action = Action(func, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
| Rename f to func in one last place (ActionQueue) | Rename f to func in one last place (ActionQueue)
| Python | mit | pajlada/tyggbot,pajlada/tyggbot,pajlada/pajbot,pajlada/pajbot,pajlada/pajbot,pajlada/pajbot,pajlada/tyggbot,pajlada/tyggbot | import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, f, args=[], kwargs={}):
action = Action(f, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
Rename f to func in one last place (ActionQueue) | import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, func, args=[], kwargs={}):
action = Action(func, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
| <commit_before>import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, f, args=[], kwargs={}):
action = Action(f, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
<commit_msg>Rename f to func in one last place (ActionQueue)<commit_after> | import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, func, args=[], kwargs={}):
action = Action(func, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
| import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, f, args=[], kwargs={}):
action = Action(f, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
Rename f to func in one last place (ActionQueue)import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, func, args=[], kwargs={}):
action = Action(func, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
| <commit_before>import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, f, args=[], kwargs={}):
action = Action(f, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
<commit_msg>Rename f to func in one last place (ActionQueue)<commit_after>import logging
import queue
import threading
log = logging.getLogger(__name__)
class Action:
func = None
args = []
kwargs = {}
def __init__(self, func=None, args=[], kwargs={}):
self.func = func
self.args = args
self.kwargs = kwargs
def run(self):
self.func(*self.args, **self.kwargs)
class ActionQueue:
ID = 0
def __init__(self):
self.queue = queue.Queue()
self.id = ActionQueue.ID
ActionQueue.ID += 1
# Starts a thread which will continuously check the queue for actions.
def start(self):
t = threading.Thread(target=self._action_parser, name="ActionQueueThread_{}".format(self.id))
t.daemon = True
t.start()
# Start a loop which waits and things to be added into the queue.
# Note: This is a blocking method, and should be run in a separate thread
# This method is started automatically if ActionQueue is declared threaded.
def _action_parser(self):
while True:
action = self.queue.get()
action.run()
# Run a single action in the queue if the queue is not empty.
def parse_action(self):
if not self.queue.empty():
action = self.queue.get()
action.run()
def add(self, func, args=[], kwargs={}):
action = Action(func, args, kwargs)
self._add(action)
def _add(self, action):
self.queue.put(action)
|
e388099f6fcc0fbc1904bb24f050d7106204e58c | src/zeit/campus/article.py | src/zeit/campus/article.py | import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink.url', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
| import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
| Fix xml path to topcpagelink | ZON-2838: Fix xml path to topcpagelink
| Python | bsd-3-clause | ZeitOnline/zeit.campus | import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink.url', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
ZON-2838: Fix xml path to topcpagelink | import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
| <commit_before>import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink.url', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
<commit_msg>ZON-2838: Fix xml path to topcpagelink<commit_after> | import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
| import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink.url', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
ZON-2838: Fix xml path to topcpagelinkimport zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
| <commit_before>import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink.url', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
<commit_msg>ZON-2838: Fix xml path to topcpagelink<commit_after>import zope.interface
import zeit.cms.content.reference
import zeit.campus.interfaces
class TopicpageLink(zeit.cms.related.related.RelatedBase):
zope.interface.implements(zeit.campus.interfaces.ITopicpageLink)
topicpagelink = zeit.cms.content.reference.SingleResource(
'.head.topicpagelink', 'related')
topicpagelink_label = zeit.cms.content.dav.mapProperties(
zeit.campus.interfaces.ITopicpageLink,
zeit.cms.interfaces.DOCUMENT_SCHEMA_NS,
('topicpagelink_label',))
topicpagelink_label = zeit.cms.content.property.ObjectPathProperty(
'.head.topicpagelink.label',
zeit.campus.interfaces.ITopicpageLink['topicpagelink_label'])
|
bf36e307b13148d40e978ebb32151a3ea0e32cf9 | stampman/tests/test_api.py | stampman/tests/test_api.py | import unittest
from stampman.services import pool
class TestAPIEndpoint(unittest.TestCase):
pass
| import unittest
import json
import requests
from stampman import main
class TestAPIRoot(unittest.TestCase):
def setUp(self):
self._port = "8000"
self._path = "http://0.0.0.0"
main.app.config['TESTING'] = True
self._app = main.app.test_client()
def testGetJson(self):
response = self._app.get("/")
expected_response = [
{
"services": [
{
"name": "mailgun",
"priority": 2
},
{
"name": "sendgrid",
"priority": 1
}
],
"url": "http://localhost/mail.sshukla.de",
"domain": "mail.sshukla.de"
}
]
response_dict = json.loads(str(response.data, 'utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response_dict, expected_response)
| Add unit test for testing flask endpoint | Add unit test for testing flask endpoint
Test GET on the `/` endpoint
| Python | mit | thunderboltsid/stampman | import unittest
from stampman.services import pool
class TestAPIEndpoint(unittest.TestCase):
pass
Add unit test for testing flask endpoint
Test GET on the `/` endpoint | import unittest
import json
import requests
from stampman import main
class TestAPIRoot(unittest.TestCase):
def setUp(self):
self._port = "8000"
self._path = "http://0.0.0.0"
main.app.config['TESTING'] = True
self._app = main.app.test_client()
def testGetJson(self):
response = self._app.get("/")
expected_response = [
{
"services": [
{
"name": "mailgun",
"priority": 2
},
{
"name": "sendgrid",
"priority": 1
}
],
"url": "http://localhost/mail.sshukla.de",
"domain": "mail.sshukla.de"
}
]
response_dict = json.loads(str(response.data, 'utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response_dict, expected_response)
| <commit_before>import unittest
from stampman.services import pool
class TestAPIEndpoint(unittest.TestCase):
pass
<commit_msg>Add unit test for testing flask endpoint
Test GET on the `/` endpoint<commit_after> | import unittest
import json
import requests
from stampman import main
class TestAPIRoot(unittest.TestCase):
def setUp(self):
self._port = "8000"
self._path = "http://0.0.0.0"
main.app.config['TESTING'] = True
self._app = main.app.test_client()
def testGetJson(self):
response = self._app.get("/")
expected_response = [
{
"services": [
{
"name": "mailgun",
"priority": 2
},
{
"name": "sendgrid",
"priority": 1
}
],
"url": "http://localhost/mail.sshukla.de",
"domain": "mail.sshukla.de"
}
]
response_dict = json.loads(str(response.data, 'utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response_dict, expected_response)
| import unittest
from stampman.services import pool
class TestAPIEndpoint(unittest.TestCase):
pass
Add unit test for testing flask endpoint
Test GET on the `/` endpointimport unittest
import json
import requests
from stampman import main
class TestAPIRoot(unittest.TestCase):
def setUp(self):
self._port = "8000"
self._path = "http://0.0.0.0"
main.app.config['TESTING'] = True
self._app = main.app.test_client()
def testGetJson(self):
response = self._app.get("/")
expected_response = [
{
"services": [
{
"name": "mailgun",
"priority": 2
},
{
"name": "sendgrid",
"priority": 1
}
],
"url": "http://localhost/mail.sshukla.de",
"domain": "mail.sshukla.de"
}
]
response_dict = json.loads(str(response.data, 'utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response_dict, expected_response)
| <commit_before>import unittest
from stampman.services import pool
class TestAPIEndpoint(unittest.TestCase):
pass
<commit_msg>Add unit test for testing flask endpoint
Test GET on the `/` endpoint<commit_after>import unittest
import json
import requests
from stampman import main
class TestAPIRoot(unittest.TestCase):
def setUp(self):
self._port = "8000"
self._path = "http://0.0.0.0"
main.app.config['TESTING'] = True
self._app = main.app.test_client()
def testGetJson(self):
response = self._app.get("/")
expected_response = [
{
"services": [
{
"name": "mailgun",
"priority": 2
},
{
"name": "sendgrid",
"priority": 1
}
],
"url": "http://localhost/mail.sshukla.de",
"domain": "mail.sshukla.de"
}
]
response_dict = json.loads(str(response.data, 'utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content_type, "application/json")
self.assertEqual(response_dict, expected_response)
|
485c722010882f5273b2cca77305b499cd4674c5 | astropy/tests/tests/test_run_tests.py | astropy/tests/tests/test_run_tests.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
def test_deprecation_warning():
with pytest.raises(DeprecationWarning):
warnings.warn('test warning', DeprecationWarning)
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
# TODO: Temporarily disabled, as this seems to non-deterministically fail
# def test_deprecation_warning():
# with pytest.raises(DeprecationWarning):
# warnings.warn('test warning', DeprecationWarning)
| Disable test that is non-deterministically failing. | Disable test that is non-deterministically failing.
| Python | bsd-3-clause | DougBurke/astropy,dhomeier/astropy,kelle/astropy,aleksandr-bakanov/astropy,AustereCuriosity/astropy,kelle/astropy,saimn/astropy,StuartLittlefair/astropy,lpsinger/astropy,DougBurke/astropy,StuartLittlefair/astropy,AustereCuriosity/astropy,dhomeier/astropy,larrybradley/astropy,stargaser/astropy,dhomeier/astropy,astropy/astropy,lpsinger/astropy,mhvk/astropy,pllim/astropy,astropy/astropy,astropy/astropy,pllim/astropy,larrybradley/astropy,bsipocz/astropy,aleksandr-bakanov/astropy,MSeifert04/astropy,DougBurke/astropy,dhomeier/astropy,bsipocz/astropy,mhvk/astropy,bsipocz/astropy,lpsinger/astropy,pllim/astropy,kelle/astropy,DougBurke/astropy,lpsinger/astropy,funbaker/astropy,larrybradley/astropy,tbabej/astropy,AustereCuriosity/astropy,larrybradley/astropy,astropy/astropy,bsipocz/astropy,stargaser/astropy,MSeifert04/astropy,joergdietrich/astropy,tbabej/astropy,AustereCuriosity/astropy,pllim/astropy,kelle/astropy,stargaser/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,funbaker/astropy,pllim/astropy,mhvk/astropy,funbaker/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,joergdietrich/astropy,tbabej/astropy,larrybradley/astropy,StuartLittlefair/astropy,MSeifert04/astropy,joergdietrich/astropy,tbabej/astropy,funbaker/astropy,astropy/astropy,dhomeier/astropy,saimn/astropy,saimn/astropy,saimn/astropy,joergdietrich/astropy,lpsinger/astropy,kelle/astropy,stargaser/astropy,mhvk/astropy,MSeifert04/astropy,tbabej/astropy,mhvk/astropy,saimn/astropy,joergdietrich/astropy,AustereCuriosity/astropy | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
def test_deprecation_warning():
with pytest.raises(DeprecationWarning):
warnings.warn('test warning', DeprecationWarning)
Disable test that is non-deterministically failing. | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
# TODO: Temporarily disabled, as this seems to non-deterministically fail
# def test_deprecation_warning():
# with pytest.raises(DeprecationWarning):
# warnings.warn('test warning', DeprecationWarning)
| <commit_before># Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
def test_deprecation_warning():
with pytest.raises(DeprecationWarning):
warnings.warn('test warning', DeprecationWarning)
<commit_msg>Disable test that is non-deterministically failing.<commit_after> | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
# TODO: Temporarily disabled, as this seems to non-deterministically fail
# def test_deprecation_warning():
# with pytest.raises(DeprecationWarning):
# warnings.warn('test warning', DeprecationWarning)
| # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
def test_deprecation_warning():
with pytest.raises(DeprecationWarning):
warnings.warn('test warning', DeprecationWarning)
Disable test that is non-deterministically failing.# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
# TODO: Temporarily disabled, as this seems to non-deterministically fail
# def test_deprecation_warning():
# with pytest.raises(DeprecationWarning):
# warnings.warn('test warning', DeprecationWarning)
| <commit_before># Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
def test_deprecation_warning():
with pytest.raises(DeprecationWarning):
warnings.warn('test warning', DeprecationWarning)
<commit_msg>Disable test that is non-deterministically failing.<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
# test helper.run_tests function
import warnings
from .. import helper
from ... import _get_test_runner
from .. helper import pytest
# run_tests should raise ValueError when asked to run on a module it can't find
def test_module_not_found():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests('fake.module')
# run_tests should raise ValueError when passed an invalid pastebin= option
def test_pastebin_keyword():
with helper.pytest.raises(ValueError):
_get_test_runner().run_tests(pastebin='not_an_option')
# tests that tests are only run in Python 3 out of the 2to3'd build (otherwise
# a syntax error would occur)
try:
from .run_after_2to3 import test_run_after_2to3
except SyntaxError:
def test_run_after_2to3():
helper.pytest.fail("Not running the 2to3'd tests!")
# TODO: Temporarily disabled, as this seems to non-deterministically fail
# def test_deprecation_warning():
# with pytest.raises(DeprecationWarning):
# warnings.warn('test warning', DeprecationWarning)
|
20ccdd67d7e398fc4e4b5f72f723da31346e66a7 | graphene/traversal/query.py | graphene/traversal/query.py | class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
| class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias=None):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
| Fix issue with Query.parse_chain being broken in InsertRelationCommand | Fix issue with Query.parse_chain being broken in InsertRelationCommand
| Python | apache-2.0 | PHB-CS123/graphene,PHB-CS123/graphene,PHB-CS123/graphene | class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
Fix issue with Query.parse_chain being broken in InsertRelationCommand | class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias=None):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
| <commit_before>class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
<commit_msg>Fix issue with Query.parse_chain being broken in InsertRelationCommand<commit_after> | class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias=None):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
| class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
Fix issue with Query.parse_chain being broken in InsertRelationCommandclass Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias=None):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
| <commit_before>class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
<commit_msg>Fix issue with Query.parse_chain being broken in InsertRelationCommand<commit_after>class Query:
def __init__(self, ident, name, oper, value):
self.ident = ident
self.name = name
self.oper = oper
self.value = value
def test(self, prop_dict):
if self.ident is not None:
key = "%s.%s" % (self.ident, self.name)
else:
key = self.name
try:
value, tt = prop_dict[key]
except KeyError:
raise Exception("%s is not a valid property name." % key)
if self.oper == '=':
return value == self.value
if self.oper == '!=':
return value != self.value
if self.oper == '>=':
return value >= self.value
if self.oper == '>':
return value > self.value
if self.oper == '<=':
return value <= self.value
if self.oper == '<':
return value < self.value
return False
@staticmethod
def parse_chain(storage_manager, chain, type_schema, alias=None):
qc = []
for q in chain:
if type(q) == tuple:
# actual query
ident, name, oper, value = q
ident = ident or alias
tt = filter(lambda t: t[1] == name, type_schema)
if len(tt) == 0:
# no such named property
raise Exception("%s is not a valid property name." % name)
ttype = tt[0][2]
qc.append(Query(ident, name, oper, storage_manager.convert_to_value(value, ttype)))
else:
qc.append(q)
return qc
|
149c1257f4af4c6962c61e74bddbdddfcc741524 | cbagent/collectors/libstats/psstats.py | cbagent/collectors/libstats/psstats.py | from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep {0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
| from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep ^{0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
| Use more precise grep expression | Use more precise grep expression
Otherwise we match wrong lines when memory stats contain PID.
Change-Id: I924c1b151ddaad8209445a514bf02a7af5d2e0e0
Reviewed-on: http://review.couchbase.org/79848
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
Tested-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
| Python | apache-2.0 | couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner | from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep {0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
Use more precise grep expression
Otherwise we match wrong lines when memory stats contain PID.
Change-Id: I924c1b151ddaad8209445a514bf02a7af5d2e0e0
Reviewed-on: http://review.couchbase.org/79848
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
Tested-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com> | from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep ^{0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
| <commit_before>from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep {0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
<commit_msg>Use more precise grep expression
Otherwise we match wrong lines when memory stats contain PID.
Change-Id: I924c1b151ddaad8209445a514bf02a7af5d2e0e0
Reviewed-on: http://review.couchbase.org/79848
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
Tested-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com><commit_after> | from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep ^{0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
| from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep {0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
Use more precise grep expression
Otherwise we match wrong lines when memory stats contain PID.
Change-Id: I924c1b151ddaad8209445a514bf02a7af5d2e0e0
Reviewed-on: http://review.couchbase.org/79848
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
Tested-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep ^{0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
| <commit_before>from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep {0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
<commit_msg>Use more precise grep expression
Otherwise we match wrong lines when memory stats contain PID.
Change-Id: I924c1b151ddaad8209445a514bf02a7af5d2e0e0
Reviewed-on: http://review.couchbase.org/79848
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
Tested-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com><commit_after>from cbagent.collectors.libstats.remotestats import RemoteStats, parallel_task
class PSStats(RemoteStats):
METRICS = (
("rss", 1024), # kB -> B
("vsize", 1024),
)
PS_CMD = "ps -eo pid,rss,vsize,comm | " \
"grep {} | grep -v grep | sort -n -k 2 | tail -n 1"
TOP_CMD = "top -b n2 -d1 -p {0} | grep ^{0}"
@parallel_task(server_side=True)
def get_server_samples(self, process):
return self.get_samples(process)
@parallel_task(server_side=False)
def get_client_samples(self, process):
return self.get_samples(process)
def get_samples(self, process):
samples = {}
stdout = self.run(self.PS_CMD.format(process))
if stdout:
for i, value in enumerate(stdout.split()[1:1 + len(self.METRICS)]):
metric, multiplier = self.METRICS[i]
title = "{}_{}".format(process, metric)
samples[title] = float(value) * multiplier
pid = stdout.split()[0]
else:
return samples
stdout = self.run(self.TOP_CMD.format(pid))
if stdout:
title = "{}_cpu".format(process)
samples[title] = float(stdout.split()[8])
return samples
|
18ea019bd77d605c367265080aa40382399b324b | test/integration/ggrc/converters/test_import_update.py | test/integration/ggrc/converters/test_import_update.py | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
filename = "policy_basic_import.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
| # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
| Use check response for import update tests | Use check response for import update tests
| Python | apache-2.0 | selahssea/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,andrei-karalionak/ggrc-core | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
filename = "policy_basic_import.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
Use check response for import update tests | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
| <commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
filename = "policy_basic_import.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
<commit_msg>Use check response for import update tests<commit_after> | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
| # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
filename = "policy_basic_import.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
Use check response for import update tests# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
| <commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
messages = ("block_errors", "block_warnings", "row_errors", "row_warnings")
filename = "policy_basic_import.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
for block in response:
for message in messages:
self.assertEqual(set(), set(block[message]))
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
<commit_msg>Use check response for import update tests<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from ggrc import models
class TestImportUpdates(TestCase):
""" Test importing of already existing objects """
def setUp(self):
TestCase.setUp(self)
self.client.get("/login")
def test_policy_basic_update(self):
""" Test simple policy title update """
filename = "policy_basic_import.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "some weird policy")
filename = "policy_basic_import_update.csv"
response = self.import_file(filename)
self._check_response(response, {})
policy = models.Policy.query.filter_by(slug="p1").first()
self.assertEqual(policy.title, "Edited policy")
|
f9aae4320522af94dde78bac0c30e909ef4ef4e2 | blockbuster/bb_dbconnector_factory.py | blockbuster/bb_dbconnector_factory.py | import logging
import bb_dbconnector_pg
log = logging.getLogger('bb_log.' + __name__)
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector() | import bb_dbconnector_pg
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector() | Remove logger as not used in module | Remove logger as not used in module
| Python | mit | mattstibbs/blockbuster-server,mattstibbs/blockbuster-server | import logging
import bb_dbconnector_pg
log = logging.getLogger('bb_log.' + __name__)
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector()Remove logger as not used in module | import bb_dbconnector_pg
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector() | <commit_before>import logging
import bb_dbconnector_pg
log = logging.getLogger('bb_log.' + __name__)
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector()<commit_msg>Remove logger as not used in module<commit_after> | import bb_dbconnector_pg
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector() | import logging
import bb_dbconnector_pg
log = logging.getLogger('bb_log.' + __name__)
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector()Remove logger as not used in moduleimport bb_dbconnector_pg
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector() | <commit_before>import logging
import bb_dbconnector_pg
log = logging.getLogger('bb_log.' + __name__)
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector()<commit_msg>Remove logger as not used in module<commit_after>import bb_dbconnector_pg
class DBConnectorInterfaceFactory:
def __init__(self):
pass
@staticmethod
def create():
return bb_dbconnector_pg.PostgresConnector() |
7972c0fbaf8b46810dd36e0d824c341ea4234b47 | swampdragon_live/models.py | swampdragon_live/models.py | # -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
| # -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, created, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
if created:
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
else:
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
| Optimize number of updates for queryset and instance listeners | Optimize number of updates for queryset and instance listeners
Only push additions to queryset listeners, not instance changes.
Only push changes to instance listeners, not queryset additions.
| Python | mit | mback2k/swampdragon-live,mback2k/swampdragon-live | # -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
Optimize number of updates for queryset and instance listeners
Only push additions to queryset listeners, not instance changes.
Only push changes to instance listeners, not queryset additions. | # -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, created, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
if created:
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
else:
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
| <commit_before># -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
<commit_msg>Optimize number of updates for queryset and instance listeners
Only push additions to queryset listeners, not instance changes.
Only push changes to instance listeners, not queryset additions.<commit_after> | # -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, created, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
if created:
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
else:
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
| # -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
Optimize number of updates for queryset and instance listeners
Only push additions to queryset listeners, not instance changes.
Only push changes to instance listeners, not queryset additions.# -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, created, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
if created:
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
else:
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
| <commit_before># -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
<commit_msg>Optimize number of updates for queryset and instance listeners
Only push additions to queryset listeners, not instance changes.
Only push changes to instance listeners, not queryset additions.<commit_after># -*- coding: utf-8 -*-
from django.db.models.signals import post_save, pre_delete
from django.contrib.contenttypes.models import ContentType
from django.dispatch import receiver
from .pushers import push_new_content_for_instance
from .pushers import push_new_content_for_queryset
@receiver(post_save)
def post_save_handler(sender, instance, created, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
if created:
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
else:
push_new_content_for_instance(instance_type_pk=instance_type.pk,
instance_pk=instance.pk)
@receiver(pre_delete)
def pre_delete_handler(sender, instance, **kwargs):
if ContentType.objects.exists():
instance_type = ContentType.objects.get_for_model(instance.__class__)
push_new_content_for_queryset(queryset_type_pk=instance_type.pk,
queryset_pk=instance.pk)
|
f14df4ae507f3161f00ac28648bd53f2bb0bd7c3 | collect_district_court_case_details.py | collect_district_court_case_details.py | import datetime
import pymongo
import os
import sys
import time
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': sys.argv[1],
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
time.sleep(2)
print 'Finished'
| import boto.utils
import datetime
import pymongo
import os
import sys
import time
import uuid
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# get some info about this process
process_id = str(uuid.uuid4())
cwd = os.getcwd()
ec2_id = None
try:
ec2_id = boto.utils.get_instance_metadata(timeout=1, num_retries=1)['instance-id']
except:
pass
# create db record for this process
db.scrapers.insert_one({
'process_id': process_id,
'cwd': cwd,
'ec2_id': ec2_id
})
fips_code = sys.argv[1]
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': fips_code,
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
db.scrapers.update_one({'process_id': process_id}, {
'$set': {
'fips_code': fips_code,
'last_update': datetime.datetime.utcnow()
}
})
time.sleep(2)
db.scrapers.remove({'process_id': process_id})
db['completed_courts'].replace_one({'fips_code': fips_code}, {
'fips_code': fips_code,
'completed_time': datetime.datetime.utcnow()
}, upsert=True)
print 'Finished'
| Save scraper settings to database | Save scraper settings to database
This is the first step in allowing multiple processes to run on
different servers. Coming in the next commit!
| Python | mit | bschoenfeld/va-court-scraper,bschoenfeld/va-court-scraper | import datetime
import pymongo
import os
import sys
import time
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': sys.argv[1],
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
time.sleep(2)
print 'Finished'
Save scraper settings to database
This is the first step in allowing multiple processes to run on
different servers. Coming in the next commit! | import boto.utils
import datetime
import pymongo
import os
import sys
import time
import uuid
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# get some info about this process
process_id = str(uuid.uuid4())
cwd = os.getcwd()
ec2_id = None
try:
ec2_id = boto.utils.get_instance_metadata(timeout=1, num_retries=1)['instance-id']
except:
pass
# create db record for this process
db.scrapers.insert_one({
'process_id': process_id,
'cwd': cwd,
'ec2_id': ec2_id
})
fips_code = sys.argv[1]
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': fips_code,
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
db.scrapers.update_one({'process_id': process_id}, {
'$set': {
'fips_code': fips_code,
'last_update': datetime.datetime.utcnow()
}
})
time.sleep(2)
db.scrapers.remove({'process_id': process_id})
db['completed_courts'].replace_one({'fips_code': fips_code}, {
'fips_code': fips_code,
'completed_time': datetime.datetime.utcnow()
}, upsert=True)
print 'Finished'
| <commit_before>import datetime
import pymongo
import os
import sys
import time
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': sys.argv[1],
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
time.sleep(2)
print 'Finished'
<commit_msg>Save scraper settings to database
This is the first step in allowing multiple processes to run on
different servers. Coming in the next commit!<commit_after> | import boto.utils
import datetime
import pymongo
import os
import sys
import time
import uuid
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# get some info about this process
process_id = str(uuid.uuid4())
cwd = os.getcwd()
ec2_id = None
try:
ec2_id = boto.utils.get_instance_metadata(timeout=1, num_retries=1)['instance-id']
except:
pass
# create db record for this process
db.scrapers.insert_one({
'process_id': process_id,
'cwd': cwd,
'ec2_id': ec2_id
})
fips_code = sys.argv[1]
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': fips_code,
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
db.scrapers.update_one({'process_id': process_id}, {
'$set': {
'fips_code': fips_code,
'last_update': datetime.datetime.utcnow()
}
})
time.sleep(2)
db.scrapers.remove({'process_id': process_id})
db['completed_courts'].replace_one({'fips_code': fips_code}, {
'fips_code': fips_code,
'completed_time': datetime.datetime.utcnow()
}, upsert=True)
print 'Finished'
| import datetime
import pymongo
import os
import sys
import time
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': sys.argv[1],
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
time.sleep(2)
print 'Finished'
Save scraper settings to database
This is the first step in allowing multiple processes to run on
different servers. Coming in the next commit!import boto.utils
import datetime
import pymongo
import os
import sys
import time
import uuid
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# get some info about this process
process_id = str(uuid.uuid4())
cwd = os.getcwd()
ec2_id = None
try:
ec2_id = boto.utils.get_instance_metadata(timeout=1, num_retries=1)['instance-id']
except:
pass
# create db record for this process
db.scrapers.insert_one({
'process_id': process_id,
'cwd': cwd,
'ec2_id': ec2_id
})
fips_code = sys.argv[1]
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': fips_code,
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
db.scrapers.update_one({'process_id': process_id}, {
'$set': {
'fips_code': fips_code,
'last_update': datetime.datetime.utcnow()
}
})
time.sleep(2)
db.scrapers.remove({'process_id': process_id})
db['completed_courts'].replace_one({'fips_code': fips_code}, {
'fips_code': fips_code,
'completed_time': datetime.datetime.utcnow()
}, upsert=True)
print 'Finished'
| <commit_before>import datetime
import pymongo
import os
import sys
import time
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': sys.argv[1],
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
time.sleep(2)
print 'Finished'
<commit_msg>Save scraper settings to database
This is the first step in allowing multiple processes to run on
different servers. Coming in the next commit!<commit_after>import boto.utils
import datetime
import pymongo
import os
import sys
import time
import uuid
from courtreader import readers
# Connect to database
client = pymongo.MongoClient(os.environ['DISTRICT_DB'])
db = client.va_district_court_cases
# Connect to District Court Reader
reader = readers.DistrictCourtReader()
reader.connect()
# get some info about this process
process_id = str(uuid.uuid4())
cwd = os.getcwd()
ec2_id = None
try:
ec2_id = boto.utils.get_instance_metadata(timeout=1, num_retries=1)['instance-id']
except:
pass
# create db record for this process
db.scrapers.insert_one({
'process_id': process_id,
'cwd': cwd,
'ec2_id': ec2_id
})
fips_code = sys.argv[1]
# Fill in cases
while True:
case = db.cases.find_one({
'FIPSCode': fips_code,
'date_collected': {'$exists': False}
})
if case is None: break
print case['CaseNumber']
case_details = reader.get_case_details_by_number( \
case['FIPSCode'], case['CaseNumber'])
case_details['date_collected'] = datetime.datetime.utcnow()
updated_case = dict(case.items() + case_details.items())
db.cases.replace_one({'_id': case['_id']}, updated_case)
db.scrapers.update_one({'process_id': process_id}, {
'$set': {
'fips_code': fips_code,
'last_update': datetime.datetime.utcnow()
}
})
time.sleep(2)
db.scrapers.remove({'process_id': process_id})
db['completed_courts'].replace_one({'fips_code': fips_code}, {
'fips_code': fips_code,
'completed_time': datetime.datetime.utcnow()
}, upsert=True)
print 'Finished'
|
1b70aee665720ce10e2e0437fb462745adbd6799 | changes/api/serializer/models/task.py | changes/api/serializer/models/task.py | from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': instance.data.get('kwargs') or {},
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
| from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
if instance.data:
args = instance.data.get('kwargs') or {}
else:
args = {}
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': args,
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
| Fix args when Task.data is empty | Fix args when Task.data is empty
| Python | apache-2.0 | bowlofstew/changes,bowlofstew/changes,bowlofstew/changes,dropbox/changes,wfxiang08/changes,dropbox/changes,bowlofstew/changes,wfxiang08/changes,dropbox/changes,wfxiang08/changes,wfxiang08/changes,dropbox/changes | from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': instance.data.get('kwargs') or {},
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
Fix args when Task.data is empty | from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
if instance.data:
args = instance.data.get('kwargs') or {}
else:
args = {}
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': args,
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
| <commit_before>from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': instance.data.get('kwargs') or {},
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
<commit_msg>Fix args when Task.data is empty<commit_after> | from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
if instance.data:
args = instance.data.get('kwargs') or {}
else:
args = {}
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': args,
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
| from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': instance.data.get('kwargs') or {},
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
Fix args when Task.data is emptyfrom changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
if instance.data:
args = instance.data.get('kwargs') or {}
else:
args = {}
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': args,
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
| <commit_before>from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': instance.data.get('kwargs') or {},
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
<commit_msg>Fix args when Task.data is empty<commit_after>from changes.api.serializer import Serializer, register
from changes.models import Task
@register(Task)
class TaskSerializer(Serializer):
def serialize(self, instance, attrs):
if instance.data:
args = instance.data.get('kwargs') or {}
else:
args = {}
return {
'id': instance.id.hex,
'objectID': instance.task_id,
'parentObjectID': instance.parent_id,
'name': instance.task_name,
'args': args,
'attempts': instance.num_retries + 1,
'status': instance.status,
'result': instance.result,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
'dateModified': instance.date_modified,
}
|
33e17ebd7046e0968db2ee01f9e2759a382b62d9 | packages/pango.py | packages/pango.py | class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.28',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023'
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
PangoPackage ()
| class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.29',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023',
# CoreText: stricter handling of FontSymbolic traits
'http://git.gnome.org/browse/pango/patch/?id=cce4c9f84350bb53371323ab96ccf9245e014f75',
# Get _pango_get_lc_ctype from system prefs on Mac OS X
'http://git.gnome.org/browse/pango/patch/?id=c21b1bfe1278de08673c495ba398fbdee874a778',
# Bug 647969 - CoreText backend needs proper font fallback/coverage support
# https://bugzilla.gnome.org/show_bug.cgi?id=647969
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=201356',
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
if Package.profile.name == 'darwin':
for p in range (2, len (self.sources)):
self.sh ('patch -p1 < "%{sources[' + str (p) + ']}"')
PangoPackage ()
| Update Pango to use CoreText | Update Pango to use CoreText | Python | mit | bl8/bockbuild,bl8/bockbuild,BansheeMediaPlayer/bockbuild,mono/bockbuild,mono/bockbuild,BansheeMediaPlayer/bockbuild,BansheeMediaPlayer/bockbuild,bl8/bockbuild | class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.28',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023'
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
PangoPackage ()
Update Pango to use CoreText | class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.29',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023',
# CoreText: stricter handling of FontSymbolic traits
'http://git.gnome.org/browse/pango/patch/?id=cce4c9f84350bb53371323ab96ccf9245e014f75',
# Get _pango_get_lc_ctype from system prefs on Mac OS X
'http://git.gnome.org/browse/pango/patch/?id=c21b1bfe1278de08673c495ba398fbdee874a778',
# Bug 647969 - CoreText backend needs proper font fallback/coverage support
# https://bugzilla.gnome.org/show_bug.cgi?id=647969
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=201356',
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
if Package.profile.name == 'darwin':
for p in range (2, len (self.sources)):
self.sh ('patch -p1 < "%{sources[' + str (p) + ']}"')
PangoPackage ()
| <commit_before>class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.28',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023'
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
PangoPackage ()
<commit_msg>Update Pango to use CoreText<commit_after> | class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.29',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023',
# CoreText: stricter handling of FontSymbolic traits
'http://git.gnome.org/browse/pango/patch/?id=cce4c9f84350bb53371323ab96ccf9245e014f75',
# Get _pango_get_lc_ctype from system prefs on Mac OS X
'http://git.gnome.org/browse/pango/patch/?id=c21b1bfe1278de08673c495ba398fbdee874a778',
# Bug 647969 - CoreText backend needs proper font fallback/coverage support
# https://bugzilla.gnome.org/show_bug.cgi?id=647969
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=201356',
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
if Package.profile.name == 'darwin':
for p in range (2, len (self.sources)):
self.sh ('patch -p1 < "%{sources[' + str (p) + ']}"')
PangoPackage ()
| class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.28',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023'
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
PangoPackage ()
Update Pango to use CoreTextclass PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.29',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023',
# CoreText: stricter handling of FontSymbolic traits
'http://git.gnome.org/browse/pango/patch/?id=cce4c9f84350bb53371323ab96ccf9245e014f75',
# Get _pango_get_lc_ctype from system prefs on Mac OS X
'http://git.gnome.org/browse/pango/patch/?id=c21b1bfe1278de08673c495ba398fbdee874a778',
# Bug 647969 - CoreText backend needs proper font fallback/coverage support
# https://bugzilla.gnome.org/show_bug.cgi?id=647969
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=201356',
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
if Package.profile.name == 'darwin':
for p in range (2, len (self.sources)):
self.sh ('patch -p1 < "%{sources[' + str (p) + ']}"')
PangoPackage ()
| <commit_before>class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.28',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023'
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
PangoPackage ()
<commit_msg>Update Pango to use CoreText<commit_after>class PangoPackage (GnomePackage):
def __init__ (self):
GnomePackage.__init__ (self,
'pango',
version_major = '1.29',
version_minor = '4',
configure_flags = [
'--without-x'
]
)
self.sources.extend ([
# patch from bgo#321419
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=96023',
# CoreText: stricter handling of FontSymbolic traits
'http://git.gnome.org/browse/pango/patch/?id=cce4c9f84350bb53371323ab96ccf9245e014f75',
# Get _pango_get_lc_ctype from system prefs on Mac OS X
'http://git.gnome.org/browse/pango/patch/?id=c21b1bfe1278de08673c495ba398fbdee874a778',
# Bug 647969 - CoreText backend needs proper font fallback/coverage support
# https://bugzilla.gnome.org/show_bug.cgi?id=647969
'http://bugzilla-attachments.gnome.org/attachment.cgi?id=201356',
])
def prep (self):
GnomePackage.prep (self)
self.sh ('patch -p0 < "%{sources[1]}"')
if Package.profile.name == 'darwin':
for p in range (2, len (self.sources)):
self.sh ('patch -p1 < "%{sources[' + str (p) + ']}"')
PangoPackage ()
|
0372de11f91e0018eb122f9a6543ecf7cc9e086b | parliament/search/management/commands/consume_indexing_queue.py | parliament/search/management/commands/consume_indexing_queue.py | import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
index.backend.update(index, list(objs))
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
| import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
prepared_objs = [index.prepare(o) for o in objs]
solr.add(prepared_objs)
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
| Use pysolr instead of haystack in indexing job | Use pysolr instead of haystack in indexing job
| Python | agpl-3.0 | twhyte/openparliament,rhymeswithcycle/openparliament,litui/openparliament,rhymeswithcycle/openparliament,litui/openparliament,rhymeswithcycle/openparliament,litui/openparliament,twhyte/openparliament,twhyte/openparliament | import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
index.backend.update(index, list(objs))
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
Use pysolr instead of haystack in indexing job | import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
prepared_objs = [index.prepare(o) for o in objs]
solr.add(prepared_objs)
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
| <commit_before>import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
index.backend.update(index, list(objs))
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
<commit_msg>Use pysolr instead of haystack in indexing job<commit_after> | import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
prepared_objs = [index.prepare(o) for o in objs]
solr.add(prepared_objs)
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
| import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
index.backend.update(index, list(objs))
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
Use pysolr instead of haystack in indexing jobimport itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
prepared_objs = [index.prepare(o) for o in objs]
solr.add(prepared_objs)
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
| <commit_before>import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
index.backend.update(index, list(objs))
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
<commit_msg>Use pysolr instead of haystack in indexing job<commit_after>import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from haystack import site
import pysolr
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Runs any queued-up search indexing tasks."
def handle(self, **options):
from parliament.search.models import IndexingTask
delete_tasks = list(
IndexingTask.objects.filter(action='delete')
)
update_tasks = list(
IndexingTask.objects.filter(action='update').prefetch_related('content_object')
)
solr = pysolr.Solr(settings.HAYSTACK_SOLR_URL)
if update_tasks:
update_objs = [t.content_object for t in update_tasks if t.content_object]
update_objs.sort(key=lambda o: o.__class__.__name__)
for cls, objs in itertools.groupby(update_objs, lambda o: o.__class__):
print "Indexing %s" % cls
index = site.get_index(cls)
prepared_objs = [index.prepare(o) for o in objs]
solr.add(prepared_objs)
IndexingTask.objects.filter(id__in=[t.id for t in update_tasks]).delete()
if delete_tasks:
for dt in delete_tasks:
print "Deleting %s" % dt.identifier
solr.delete(id=dt.identifier, commit=False)
solr.commit()
IndexingTask.objects.filter(id__in=[t.id for t in delete_tasks]).delete()
|
ee82b77f562ee1d49c2fc724a3fc58b101c0dd2b | src/devilry_qualifiesforexam/devilry_qualifiesforexam/urls.py | src/devilry_qualifiesforexam/devilry_qualifiesforexam/urls.py | from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^wizard/(?P<periodid>\d+)/$',
login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
| from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^$', login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
| Remove period id from app url. | devilry_qualfiesforexam: Remove period id from app url.
| Python | bsd-3-clause | devilry/devilry-django,devilry/devilry-django,devilry/devilry-django,devilry/devilry-django | from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^wizard/(?P<periodid>\d+)/$',
login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
devilry_qualfiesforexam: Remove period id from app url. | from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^$', login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
| <commit_before>from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^wizard/(?P<periodid>\d+)/$',
login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
<commit_msg>devilry_qualfiesforexam: Remove period id from app url.<commit_after> | from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^$', login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
| from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^wizard/(?P<periodid>\d+)/$',
login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
devilry_qualfiesforexam: Remove period id from app url.from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^$', login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
| <commit_before>from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^wizard/(?P<periodid>\d+)/$',
login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
<commit_msg>devilry_qualfiesforexam: Remove period id from app url.<commit_after>from django.conf.urls import patterns, url, include
from django.contrib.auth.decorators import login_required
from django.views.i18n import javascript_catalog
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from devilry_settings.i18n import get_javascript_catalog_packages
from .views import AppView
i18n_packages = get_javascript_catalog_packages('devilry_header', 'devilry.apps.core')
urlpatterns = patterns('devilry_qualifiesforexam',
url('^rest/', include('devilry_qualifiesforexam.rest.urls')),
url('^$', login_required(csrf_protect(ensure_csrf_cookie(AppView.as_view()))),
name='devilry_qualifiesforexam_ui'),
url('^i18n.js$', javascript_catalog, kwargs={'packages': i18n_packages},
name='devilry_qualifiesforexam_i18n')
)
|
edbcfbf83ab79fff7de00c7a6310c9fceb17df91 | accelerator/migrations/0099_update_program_model.py | accelerator/migrations/0099_update_program_model.py | # Generated by Django 2.2.28 on 2022-04-20 13:05
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=models.ImageField(null=True, upload_to=''),
),
]
| # Generated by Django 2.2.28 on 2022-04-20 13:05
import sorl.thumbnail.fields
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=sorl.thumbnail.fields.ImageField(
null=True,
upload_to='program_images'),
),
]
| Fix image field import and migration | [AC-9452] Fix image field import and migration
| Python | mit | masschallenge/django-accelerator,masschallenge/django-accelerator | # Generated by Django 2.2.28 on 2022-04-20 13:05
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=models.ImageField(null=True, upload_to=''),
),
]
[AC-9452] Fix image field import and migration | # Generated by Django 2.2.28 on 2022-04-20 13:05
import sorl.thumbnail.fields
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=sorl.thumbnail.fields.ImageField(
null=True,
upload_to='program_images'),
),
]
| <commit_before># Generated by Django 2.2.28 on 2022-04-20 13:05
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=models.ImageField(null=True, upload_to=''),
),
]
<commit_msg>[AC-9452] Fix image field import and migration<commit_after> | # Generated by Django 2.2.28 on 2022-04-20 13:05
import sorl.thumbnail.fields
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=sorl.thumbnail.fields.ImageField(
null=True,
upload_to='program_images'),
),
]
| # Generated by Django 2.2.28 on 2022-04-20 13:05
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=models.ImageField(null=True, upload_to=''),
),
]
[AC-9452] Fix image field import and migration# Generated by Django 2.2.28 on 2022-04-20 13:05
import sorl.thumbnail.fields
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=sorl.thumbnail.fields.ImageField(
null=True,
upload_to='program_images'),
),
]
| <commit_before># Generated by Django 2.2.28 on 2022-04-20 13:05
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=models.ImageField(null=True, upload_to=''),
),
]
<commit_msg>[AC-9452] Fix image field import and migration<commit_after># Generated by Django 2.2.28 on 2022-04-20 13:05
import sorl.thumbnail.fields
from django.db import (
migrations,
models,
)
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0098_update_startup_update_20220408_0441'),
]
operations = [
migrations.AddField(
model_name='program',
name='hubspot_url',
field=models.URLField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='program_image',
field=sorl.thumbnail.fields.ImageField(
null=True,
upload_to='program_images'),
),
]
|
54454f7f963730aa3fc91c5b1baf44762b07fb47 | contrib/linux/tests/test_action_dig.py | contrib/linux/tests/test_action_dig.py | #!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
| #!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
| Use "The StackStorm Authors" in copyright | Use "The StackStorm Authors" in copyright
Co-authored-by: Eugen C. <67cac084fbaf94f499d5252e3cb50165dbf6e026@armab.io> | Python | apache-2.0 | StackStorm/st2,Plexxi/st2,StackStorm/st2,StackStorm/st2,StackStorm/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2 | #!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
Use "The StackStorm Authors" in copyright
Co-authored-by: Eugen C. <67cac084fbaf94f499d5252e3cb50165dbf6e026@armab.io> | #!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
| <commit_before>#!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
<commit_msg>Use "The StackStorm Authors" in copyright
Co-authored-by: Eugen C. <67cac084fbaf94f499d5252e3cb50165dbf6e026@armab.io><commit_after> | #!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
| #!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
Use "The StackStorm Authors" in copyright
Co-authored-by: Eugen C. <67cac084fbaf94f499d5252e3cb50165dbf6e026@armab.io>#!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
| <commit_before>#!/usr/bin/env python
# Copyright 2020 The StackStorm Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
<commit_msg>Use "The StackStorm Authors" in copyright
Co-authored-by: Eugen C. <67cac084fbaf94f499d5252e3cb50165dbf6e026@armab.io><commit_after>#!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from st2tests.base import BaseActionTestCase
from dig import DigAction
class DigActionTestCase(BaseActionTestCase):
action_cls = DigAction
def test_run(self):
action = self.get_action_instance()
# Use the defaults from dig.yaml
result = action.run(rand=False, count=0, nameserver=None, hostname='', queryopts='short')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 0)
result = action.run(rand=False, count=0, nameserver=None, hostname='google.com',
queryopts='')
self.assertIsInstance(result, list)
self.assertGreater(len(result), 0)
|
3f2c13ecc64c84b51ecc5867004b9cbc32e375ac | Discord/utilities/errors.py | Discord/utilities/errors.py |
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
class AudioNotPlaying(AudioError):
'''Audio Not Playing'''
pass
|
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
| Remove Audio Not Playing error | [Discord] Remove Audio Not Playing error
| Python | mit | Harmon758/Harmonbot,Harmon758/Harmonbot |
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
class AudioNotPlaying(AudioError):
'''Audio Not Playing'''
pass
[Discord] Remove Audio Not Playing error |
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
| <commit_before>
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
class AudioNotPlaying(AudioError):
'''Audio Not Playing'''
pass
<commit_msg>[Discord] Remove Audio Not Playing error<commit_after> |
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
|
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
class AudioNotPlaying(AudioError):
'''Audio Not Playing'''
pass
[Discord] Remove Audio Not Playing error
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
| <commit_before>
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
class AudioNotPlaying(AudioError):
'''Audio Not Playing'''
pass
<commit_msg>[Discord] Remove Audio Not Playing error<commit_after>
from discord.ext.commands.errors import CommandError
class NotServerOwner(CommandError):
'''Not Server Owner'''
pass
class VoiceNotConnected(CommandError):
'''Voice Not Connected'''
pass
class PermittedVoiceNotConnected(VoiceNotConnected):
'''Permitted, but Voice Not Connected'''
pass
class NotPermittedVoiceNotConnected(VoiceNotConnected):
'''Voice Not Connected, and Not Permitted'''
pass
class MissingPermissions(CommandError):
'''Missing Permissions'''
pass
class MissingCapability(CommandError):
'''Missing Capability'''
def __init__(self, permissions):
self.permissions = permissions
class NotPermitted(CommandError):
'''Not Permitted'''
pass
class AudioError(CommandError):
'''Audio Error'''
pass
|
6dfa381b26948b97b7abc3de9f1a02618fd5ad0f | src/geoserver/style.py | src/geoserver/style.py | from geoserver.support import ResourceInfo, atom_link
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def __repr__(self):
return "Style[%s]" % self.name
| from geoserver.support import ResourceInfo, atom_link
import re
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def body_href(self):
style_container = re.sub(r"/rest$", "/styles", self.catalog.service_url)
return "%s/%s" % (style_container, self.filename)
def __repr__(self):
return "Style[%s]" % self.name
| Add body_href method for getting a public url for a Style's body. | Add body_href method for getting a public url for a Style's body.
| Python | mit | cristianzamar/gsconfig,scottp-dpaw/gsconfig,boundlessgeo/gsconfig,Geode/gsconfig,afabiani/gsconfig,garnertb/gsconfig.py | from geoserver.support import ResourceInfo, atom_link
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def __repr__(self):
return "Style[%s]" % self.name
Add body_href method for getting a public url for a Style's body. | from geoserver.support import ResourceInfo, atom_link
import re
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def body_href(self):
style_container = re.sub(r"/rest$", "/styles", self.catalog.service_url)
return "%s/%s" % (style_container, self.filename)
def __repr__(self):
return "Style[%s]" % self.name
| <commit_before>from geoserver.support import ResourceInfo, atom_link
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def __repr__(self):
return "Style[%s]" % self.name
<commit_msg>Add body_href method for getting a public url for a Style's body.<commit_after> | from geoserver.support import ResourceInfo, atom_link
import re
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def body_href(self):
style_container = re.sub(r"/rest$", "/styles", self.catalog.service_url)
return "%s/%s" % (style_container, self.filename)
def __repr__(self):
return "Style[%s]" % self.name
| from geoserver.support import ResourceInfo, atom_link
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def __repr__(self):
return "Style[%s]" % self.name
Add body_href method for getting a public url for a Style's body.from geoserver.support import ResourceInfo, atom_link
import re
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def body_href(self):
style_container = re.sub(r"/rest$", "/styles", self.catalog.service_url)
return "%s/%s" % (style_container, self.filename)
def __repr__(self):
return "Style[%s]" % self.name
| <commit_before>from geoserver.support import ResourceInfo, atom_link
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def __repr__(self):
return "Style[%s]" % self.name
<commit_msg>Add body_href method for getting a public url for a Style's body.<commit_after>from geoserver.support import ResourceInfo, atom_link
import re
class Style(ResourceInfo):
def __init__(self,catalog, node):
self.catalog = catalog
self.name = node.find("name").text
self.href = atom_link(node)
self.update()
def update(self):
ResourceInfo.update(self)
self.name = self.metadata.find("name").text
self.filename = self.metadata.find("filename").text
# Get the raw sld
sld_url = self.href.replace(".xml", ".sld")
sld_xml = self.catalog.get_xml(sld_url)
# Obtain the user style node where title and name are located
user_style = sld_xml.find("{http://www.opengis.net/sld}NamedLayer/{http://www.opengis.net/sld}UserStyle")
# Extract name and title nodes from user_style
name_node = user_style.find("{http://www.opengis.net/sld}Name")
title_node = user_style.find("{http://www.opengis.net/sld}Title")
# Store the text value of sld name and title if present
self.sld_name = name_node.text if hasattr(name_node, 'text') else None
self.sld_title = title_node.text if hasattr(title_node, 'text') else None
def body_href(self):
style_container = re.sub(r"/rest$", "/styles", self.catalog.service_url)
return "%s/%s" % (style_container, self.filename)
def __repr__(self):
return "Style[%s]" % self.name
|
115dbdecabc74f4f08d07099a4997860ebe5278b | telemetry/telemetry/core/platform/profiler/android_screen_recorder_profiler.py | telemetry/telemetry/core/platform/profiler/android_screen_recorder_profiler.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'), '--video', '--file',
self._output_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'),
'--video',
'--file', self._output_path,
'--device', browser_backend.adb.device()],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
| Fix screen recording with multiple connected devices | telemetry: Fix screen recording with multiple connected devices
Make it possible to use the Android screen recording profiler with
multiple connected devices. Only the screen on the device that is
actually running the telemetry test will get recorded.
BUG=331435
TEST=tools/perf/run_benchmark smoothness.key_mobile_sites \
--page-filter=linus --browser=android-content-shell \
--profiler=android-screen-recorder
NOTRY=true
Review URL: https://codereview.chromium.org/119323008
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@243080 0039d316-1c4b-4281-b951-d872f2087c98
| Python | bsd-3-clause | SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,catapult-project/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,benschmaus/catapult,sahiljain/catapult,SummerLW/Perf-Insight-Report,benschmaus/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,sahiljain/catapult,sahiljain/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult,sahiljain/catapult,catapult-project/catapult,benschmaus/catapult,benschmaus/catapult | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'), '--video', '--file',
self._output_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
telemetry: Fix screen recording with multiple connected devices
Make it possible to use the Android screen recording profiler with
multiple connected devices. Only the screen on the device that is
actually running the telemetry test will get recorded.
BUG=331435
TEST=tools/perf/run_benchmark smoothness.key_mobile_sites \
--page-filter=linus --browser=android-content-shell \
--profiler=android-screen-recorder
NOTRY=true
Review URL: https://codereview.chromium.org/119323008
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@243080 0039d316-1c4b-4281-b951-d872f2087c98 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'),
'--video',
'--file', self._output_path,
'--device', browser_backend.adb.device()],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
| <commit_before># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'), '--video', '--file',
self._output_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
<commit_msg>telemetry: Fix screen recording with multiple connected devices
Make it possible to use the Android screen recording profiler with
multiple connected devices. Only the screen on the device that is
actually running the telemetry test will get recorded.
BUG=331435
TEST=tools/perf/run_benchmark smoothness.key_mobile_sites \
--page-filter=linus --browser=android-content-shell \
--profiler=android-screen-recorder
NOTRY=true
Review URL: https://codereview.chromium.org/119323008
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@243080 0039d316-1c4b-4281-b951-d872f2087c98<commit_after> | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'),
'--video',
'--file', self._output_path,
'--device', browser_backend.adb.device()],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'), '--video', '--file',
self._output_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
telemetry: Fix screen recording with multiple connected devices
Make it possible to use the Android screen recording profiler with
multiple connected devices. Only the screen on the device that is
actually running the telemetry test will get recorded.
BUG=331435
TEST=tools/perf/run_benchmark smoothness.key_mobile_sites \
--page-filter=linus --browser=android-content-shell \
--profiler=android-screen-recorder
NOTRY=true
Review URL: https://codereview.chromium.org/119323008
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@243080 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'),
'--video',
'--file', self._output_path,
'--device', browser_backend.adb.device()],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
| <commit_before># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'), '--video', '--file',
self._output_path], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
<commit_msg>telemetry: Fix screen recording with multiple connected devices
Make it possible to use the Android screen recording profiler with
multiple connected devices. Only the screen on the device that is
actually running the telemetry test will get recorded.
BUG=331435
TEST=tools/perf/run_benchmark smoothness.key_mobile_sites \
--page-filter=linus --browser=android-content-shell \
--profiler=android-screen-recorder
NOTRY=true
Review URL: https://codereview.chromium.org/119323008
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@243080 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
from telemetry.core import util
from telemetry.core.backends.chrome import android_browser_finder
from telemetry.core.platform import profiler
class AndroidScreenRecordingProfiler(profiler.Profiler):
"""Captures a screen recording on Android."""
def __init__(self, browser_backend, platform_backend, output_path, state):
super(AndroidScreenRecordingProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
self._output_path = output_path + '.mp4'
self._recorder = subprocess.Popen(
[os.path.join(util.GetChromiumSrcDir(), 'build', 'android',
'screenshot.py'),
'--video',
'--file', self._output_path,
'--device', browser_backend.adb.device()],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
@classmethod
def name(cls):
return 'android-screen-recorder'
@classmethod
def is_supported(cls, browser_type):
if browser_type == 'any':
return android_browser_finder.CanFindAvailableBrowsers()
return browser_type.startswith('android')
def CollectProfile(self):
self._recorder.communicate(input='\n')
self._recorder.wait()
print 'Screen recording saved as %s' % self._output_path
print 'To view, open in Chrome or a video player'
return [self._output_path]
|
c76734ea034f2a48de0eab995c5db5667086e0c8 | common/util/log.py | common/util/log.py | import sublime
def universal_newlines(string):
return string.replace('\r\n', '\n').replace('\r', '\n')
def panel(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
| import re
import sublime
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
def normalize(string):
return ANSI_ESCAPE_RE.sub('', string.replace('\r\n', '\n').replace('\r', '\n'))
def panel(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
| Remove ANSI escape sequences from panel output | Remove ANSI escape sequences from panel output
| Python | mit | divmain/GitSavvy,divmain/GitSavvy,divmain/GitSavvy | import sublime
def universal_newlines(string):
return string.replace('\r\n', '\n').replace('\r', '\n')
def panel(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
Remove ANSI escape sequences from panel output | import re
import sublime
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
def normalize(string):
return ANSI_ESCAPE_RE.sub('', string.replace('\r\n', '\n').replace('\r', '\n'))
def panel(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
| <commit_before>import sublime
def universal_newlines(string):
return string.replace('\r\n', '\n').replace('\r', '\n')
def panel(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
<commit_msg>Remove ANSI escape sequences from panel output<commit_after> | import re
import sublime
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
def normalize(string):
return ANSI_ESCAPE_RE.sub('', string.replace('\r\n', '\n').replace('\r', '\n'))
def panel(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
| import sublime
def universal_newlines(string):
return string.replace('\r\n', '\n').replace('\r', '\n')
def panel(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
Remove ANSI escape sequences from panel outputimport re
import sublime
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
def normalize(string):
return ANSI_ESCAPE_RE.sub('', string.replace('\r\n', '\n').replace('\r', '\n'))
def panel(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
| <commit_before>import sublime
def universal_newlines(string):
return string.replace('\r\n', '\n').replace('\r', '\n')
def panel(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = universal_newlines(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
<commit_msg>Remove ANSI escape sequences from panel output<commit_after>import re
import sublime
ANSI_ESCAPE_RE = re.compile(r'\x1B\[[0-?]*[ -/]*[@-~]')
def normalize(string):
return ANSI_ESCAPE_RE.sub('', string.replace('\r\n', '\n').replace('\r', '\n'))
def panel(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_display_panel", {"msg": message})
)
else:
view.run_command("gs_display_panel", {"msg": message})
def panel_append(message, run_async=True):
message = normalize(str(message))
view = sublime.active_window().active_view()
if run_async:
sublime.set_timeout_async(
lambda: view.run_command("gs_append_panel", {"msg": message})
)
else:
view.run_command("gs_append_panel", {"msg": message})
|
a610faf9d64c062ed2dd44a818acc0d12d1f6e0b | django_evolution/compat/picklers.py | django_evolution/compat/picklers.py | """Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
class DjangoCompatUnpickler(pickle.Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
| """Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
from django_evolution.compat.datastructures import OrderedDict
class SortedDict(dict):
"""Compatibility for unpickling a SortedDict.
Old signatures may use an old Django ``SortedDict`` structure, which does
not exist in modern versions. This changes any construction of this
data structure into a :py:class:`collections.OrderedDict`.
"""
def __new__(cls, *args, **kwargs):
"""Construct an instance of the class.
Args:
*args (tuple):
Positional arguments to pass to the constructor.
**kwargs (dict):
Keyword arguments to pass to the constructor.
Returns:
collections.OrderedDict:
The new instance.
"""
return OrderedDict.__new__(cls, *args, **kwargs)
class DjangoCompatUnpickler(pickle._Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.utils.datastructures' and name == 'SortedDict':
return SortedDict
elif module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
| Support loading pickled data referencing SortedDict. | Support loading pickled data referencing SortedDict.
Django used to provide a class called `SortedDict`, which has long been
deprecated in favor of Python's own `OrderedDict`. However, due to the
way that pickling works, older signatures would still attempt to loading
a `SortedDict` class.
This change adds a compatibility mechanism for this. Upon finding an
attempt to load a `SortedDict`, we instead give it a forwarding object
that's compatible with the unpickle code that constructs a new
`OrderedDict`. It's a bit hacky, in that we need to have this class that
subclasses `dict`, overrides `__new__`, and then returns an entirely
different object, but it's a necessity for the compatibility.
Testing Done:
Successfully loaded an older signature on Python 3.7/Django 1.11.
Reviewed at https://reviews.reviewboard.org/r/10557/
| Python | bsd-3-clause | beanbaginc/django-evolution | """Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
class DjangoCompatUnpickler(pickle.Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
Support loading pickled data referencing SortedDict.
Django used to provide a class called `SortedDict`, which has long been
deprecated in favor of Python's own `OrderedDict`. However, due to the
way that pickling works, older signatures would still attempt to loading
a `SortedDict` class.
This change adds a compatibility mechanism for this. Upon finding an
attempt to load a `SortedDict`, we instead give it a forwarding object
that's compatible with the unpickle code that constructs a new
`OrderedDict`. It's a bit hacky, in that we need to have this class that
subclasses `dict`, overrides `__new__`, and then returns an entirely
different object, but it's a necessity for the compatibility.
Testing Done:
Successfully loaded an older signature on Python 3.7/Django 1.11.
Reviewed at https://reviews.reviewboard.org/r/10557/ | """Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
from django_evolution.compat.datastructures import OrderedDict
class SortedDict(dict):
"""Compatibility for unpickling a SortedDict.
Old signatures may use an old Django ``SortedDict`` structure, which does
not exist in modern versions. This changes any construction of this
data structure into a :py:class:`collections.OrderedDict`.
"""
def __new__(cls, *args, **kwargs):
"""Construct an instance of the class.
Args:
*args (tuple):
Positional arguments to pass to the constructor.
**kwargs (dict):
Keyword arguments to pass to the constructor.
Returns:
collections.OrderedDict:
The new instance.
"""
return OrderedDict.__new__(cls, *args, **kwargs)
class DjangoCompatUnpickler(pickle._Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.utils.datastructures' and name == 'SortedDict':
return SortedDict
elif module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
| <commit_before>"""Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
class DjangoCompatUnpickler(pickle.Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
<commit_msg>Support loading pickled data referencing SortedDict.
Django used to provide a class called `SortedDict`, which has long been
deprecated in favor of Python's own `OrderedDict`. However, due to the
way that pickling works, older signatures would still attempt to loading
a `SortedDict` class.
This change adds a compatibility mechanism for this. Upon finding an
attempt to load a `SortedDict`, we instead give it a forwarding object
that's compatible with the unpickle code that constructs a new
`OrderedDict`. It's a bit hacky, in that we need to have this class that
subclasses `dict`, overrides `__new__`, and then returns an entirely
different object, but it's a necessity for the compatibility.
Testing Done:
Successfully loaded an older signature on Python 3.7/Django 1.11.
Reviewed at https://reviews.reviewboard.org/r/10557/<commit_after> | """Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
from django_evolution.compat.datastructures import OrderedDict
class SortedDict(dict):
"""Compatibility for unpickling a SortedDict.
Old signatures may use an old Django ``SortedDict`` structure, which does
not exist in modern versions. This changes any construction of this
data structure into a :py:class:`collections.OrderedDict`.
"""
def __new__(cls, *args, **kwargs):
"""Construct an instance of the class.
Args:
*args (tuple):
Positional arguments to pass to the constructor.
**kwargs (dict):
Keyword arguments to pass to the constructor.
Returns:
collections.OrderedDict:
The new instance.
"""
return OrderedDict.__new__(cls, *args, **kwargs)
class DjangoCompatUnpickler(pickle._Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.utils.datastructures' and name == 'SortedDict':
return SortedDict
elif module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
| """Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
class DjangoCompatUnpickler(pickle.Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
Support loading pickled data referencing SortedDict.
Django used to provide a class called `SortedDict`, which has long been
deprecated in favor of Python's own `OrderedDict`. However, due to the
way that pickling works, older signatures would still attempt to loading
a `SortedDict` class.
This change adds a compatibility mechanism for this. Upon finding an
attempt to load a `SortedDict`, we instead give it a forwarding object
that's compatible with the unpickle code that constructs a new
`OrderedDict`. It's a bit hacky, in that we need to have this class that
subclasses `dict`, overrides `__new__`, and then returns an entirely
different object, but it's a necessity for the compatibility.
Testing Done:
Successfully loaded an older signature on Python 3.7/Django 1.11.
Reviewed at https://reviews.reviewboard.org/r/10557/"""Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
from django_evolution.compat.datastructures import OrderedDict
class SortedDict(dict):
"""Compatibility for unpickling a SortedDict.
Old signatures may use an old Django ``SortedDict`` structure, which does
not exist in modern versions. This changes any construction of this
data structure into a :py:class:`collections.OrderedDict`.
"""
def __new__(cls, *args, **kwargs):
"""Construct an instance of the class.
Args:
*args (tuple):
Positional arguments to pass to the constructor.
**kwargs (dict):
Keyword arguments to pass to the constructor.
Returns:
collections.OrderedDict:
The new instance.
"""
return OrderedDict.__new__(cls, *args, **kwargs)
class DjangoCompatUnpickler(pickle._Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.utils.datastructures' and name == 'SortedDict':
return SortedDict
elif module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
| <commit_before>"""Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
class DjangoCompatUnpickler(pickle.Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
<commit_msg>Support loading pickled data referencing SortedDict.
Django used to provide a class called `SortedDict`, which has long been
deprecated in favor of Python's own `OrderedDict`. However, due to the
way that pickling works, older signatures would still attempt to loading
a `SortedDict` class.
This change adds a compatibility mechanism for this. Upon finding an
attempt to load a `SortedDict`, we instead give it a forwarding object
that's compatible with the unpickle code that constructs a new
`OrderedDict`. It's a bit hacky, in that we need to have this class that
subclasses `dict`, overrides `__new__`, and then returns an entirely
different object, but it's a necessity for the compatibility.
Testing Done:
Successfully loaded an older signature on Python 3.7/Django 1.11.
Reviewed at https://reviews.reviewboard.org/r/10557/<commit_after>"""Picklers for working with serialized data."""
from __future__ import unicode_literals
import pickle
from django_evolution.compat.datastructures import OrderedDict
class SortedDict(dict):
"""Compatibility for unpickling a SortedDict.
Old signatures may use an old Django ``SortedDict`` structure, which does
not exist in modern versions. This changes any construction of this
data structure into a :py:class:`collections.OrderedDict`.
"""
def __new__(cls, *args, **kwargs):
"""Construct an instance of the class.
Args:
*args (tuple):
Positional arguments to pass to the constructor.
**kwargs (dict):
Keyword arguments to pass to the constructor.
Returns:
collections.OrderedDict:
The new instance.
"""
return OrderedDict.__new__(cls, *args, **kwargs)
class DjangoCompatUnpickler(pickle._Unpickler):
"""Unpickler compatible with changes to Django class/module paths.
This provides compatibility across Django versions for various field types,
updating referenced module paths for fields to a standard location so
that the fields can be located on all Django versions.
"""
def find_class(self, module, name):
"""Return the class for a given module and class name.
If looking up a class from ``django.db.models.fields``, the class will
instead be looked up from ``django.db.models``, fixing lookups on
some Django versions.
Args:
module (unicode):
The module path.
name (unicode):
The class name.
Returns:
type:
The resulting class.
Raises:
AttributeError:
The class could not be found in the module.
"""
if module == 'django.utils.datastructures' and name == 'SortedDict':
return SortedDict
elif module == 'django.db.models.fields':
module = 'django.db.models'
return super(DjangoCompatUnpickler, self).find_class(module, name)
|
4640b75fdb794e29cb6e7bdc03a6697d8f9f3483 | emu/processes/wps_ultimate_question.py | emu/processes/wps_ultimate_question.py | from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of "What is the meaning of life?"',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
response.update_status('PyWPS Process started.', 0)
sleep_delay = .1
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
| from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of life, the universe, and everything.',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
sleep_delay = .1
response.update_status('PyWPS Process started.', 0)
time.sleep(sleep_delay)
response.update_status("Contacting the Deep Thought supercomputer.", 10)
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
| Make ultimate question ever more ultimate | Make ultimate question ever more ultimate
| Python | apache-2.0 | bird-house/emu | from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of "What is the meaning of life?"',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
response.update_status('PyWPS Process started.', 0)
sleep_delay = .1
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
Make ultimate question ever more ultimate | from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of life, the universe, and everything.',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
sleep_delay = .1
response.update_status('PyWPS Process started.', 0)
time.sleep(sleep_delay)
response.update_status("Contacting the Deep Thought supercomputer.", 10)
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
| <commit_before>from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of "What is the meaning of life?"',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
response.update_status('PyWPS Process started.', 0)
sleep_delay = .1
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
<commit_msg>Make ultimate question ever more ultimate<commit_after> | from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of life, the universe, and everything.',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
sleep_delay = .1
response.update_status('PyWPS Process started.', 0)
time.sleep(sleep_delay)
response.update_status("Contacting the Deep Thought supercomputer.", 10)
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
| from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of "What is the meaning of life?"',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
response.update_status('PyWPS Process started.', 0)
sleep_delay = .1
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
Make ultimate question ever more ultimatefrom pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of life, the universe, and everything.',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
sleep_delay = .1
response.update_status('PyWPS Process started.', 0)
time.sleep(sleep_delay)
response.update_status("Contacting the Deep Thought supercomputer.", 10)
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
| <commit_before>from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of "What is the meaning of life?"',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
response.update_status('PyWPS Process started.', 0)
sleep_delay = .1
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
<commit_msg>Make ultimate question ever more ultimate<commit_after>from pywps import Process, LiteralOutput
from pywps.app.Common import Metadata
class UltimateQuestion(Process):
def __init__(self):
inputs = []
outputs = [LiteralOutput('answer', 'Answer to Ultimate Question', data_type='string')]
super(UltimateQuestion, self).__init__(
self._handler,
identifier='ultimate_question',
version='2.0',
title='Answer to the ultimate question',
abstract='This process gives the answer to the ultimate question of life, the universe, and everything.',
profile='',
metadata=[Metadata('Ultimate Question'), Metadata('What is the meaning of life')],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
@staticmethod
def _handler(request, response):
import time
sleep_delay = .1
response.update_status('PyWPS Process started.', 0)
time.sleep(sleep_delay)
response.update_status("Contacting the Deep Thought supercomputer.", 10)
time.sleep(sleep_delay)
response.update_status('Thinking...', 20)
time.sleep(sleep_delay)
response.update_status('Thinking...', 40)
time.sleep(sleep_delay)
response.update_status('Thinking...', 60)
time.sleep(sleep_delay)
response.update_status('Thinking...', 80)
response.outputs['answer'].data = '42'
response.update_status('PyWPS Process completed.', 100)
return response
|
bf0043ac102cc9eddf03c8db493ae1a985c6a30a | src/nyc_trees/apps/home/urls.py | src/nyc_trees/apps/home/urls.py | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training', include('django.contrib.flatpages.urls')),
)
| # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training(?P<url>.*/)$', include('django.contrib.flatpages.urls')),
)
| Fix trailing slash 404 for flat pages and co | Fix trailing slash 404 for flat pages and co
By modifying the URL, flatpage requests without a trailing slash will
always fail, triggering the redirect provided by `APPEND_SLASH`.
This is important because urls that share a common endpoint path were
404ing on a flatpage not found when not constructed with a slash.
| Python | agpl-3.0 | azavea/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,kdeloach/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,maurizi/nyc-trees,kdeloach/nyc-trees,RickMohr/nyc-trees,azavea/nyc-trees,RickMohr/nyc-trees,kdeloach/nyc-trees,RickMohr/nyc-trees,azavea/nyc-trees,maurizi/nyc-trees,kdeloach/nyc-trees,RickMohr/nyc-trees | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training', include('django.contrib.flatpages.urls')),
)
Fix trailing slash 404 for flat pages and co
By modifying the URL, flatpage requests without a trailing slash will
always fail, triggering the redirect provided by `APPEND_SLASH`.
This is important because urls that share a common endpoint path were
404ing on a flatpage not found when not constructed with a slash. | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training(?P<url>.*/)$', include('django.contrib.flatpages.urls')),
)
| <commit_before># -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training', include('django.contrib.flatpages.urls')),
)
<commit_msg>Fix trailing slash 404 for flat pages and co
By modifying the URL, flatpage requests without a trailing slash will
always fail, triggering the redirect provided by `APPEND_SLASH`.
This is important because urls that share a common endpoint path were
404ing on a flatpage not found when not constructed with a slash.<commit_after> | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training(?P<url>.*/)$', include('django.contrib.flatpages.urls')),
)
| # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training', include('django.contrib.flatpages.urls')),
)
Fix trailing slash 404 for flat pages and co
By modifying the URL, flatpage requests without a trailing slash will
always fail, triggering the redirect provided by `APPEND_SLASH`.
This is important because urls that share a common endpoint path were
404ing on a flatpage not found when not constructed with a slash.# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training(?P<url>.*/)$', include('django.contrib.flatpages.urls')),
)
| <commit_before># -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training', include('django.contrib.flatpages.urls')),
)
<commit_msg>Fix trailing slash 404 for flat pages and co
By modifying the URL, flatpage requests without a trailing slash will
always fail, triggering the redirect provided by `APPEND_SLASH`.
This is important because urls that share a common endpoint path were
404ing on a flatpage not found when not constructed with a slash.<commit_after># -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.conf.urls import patterns, url, include
from apps.home import routes as r
urlpatterns = patterns(
'',
url(r'^$', r.home_page, name='home_page'),
url(r'^progress/$', r.progress_page, name='progress_page'),
url(r'^jobs/(?P<job_id>\d+)/$', r.retrieve_job_status,
name='retrieve_job_status'),
url(r'^training/$', r.training_list_page, name="training_list_page"),
url(r'^training/groups_to_follow/$', r.groups_to_follow),
# "training" instead of "training/" because the flatpages admin interface
# insists that the "URL" (really a URL segment) start with a leading slash
url(r'^training(?P<url>.*/)$', include('django.contrib.flatpages.urls')),
)
|
cebe1c3b72eb9e0fd4114d5664e269a73bdc06a1 | examples/many_pairwise_correlations.py | examples/many_pairwise_correlations.py | """
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = rs.normal(size=(30, 100))
# Compute the correlation matrix
corr = np.corrcoef(d)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
| """
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
from string import letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = pd.DataFrame(data=rs.normal(size=(100, 26)),
columns=list(letters[:26]))
# Compute the correlation matrix
corr = d.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
| Use dataframe to show how semantic information is used | Use dataframe to show how semantic information is used
| Python | bsd-3-clause | mia1rab/seaborn,muku42/seaborn,JWarmenhoven/seaborn,mwaskom/seaborn,drewokane/seaborn,jat255/seaborn,phobson/seaborn,bsipocz/seaborn,uhjish/seaborn,arokem/seaborn,tim777z/seaborn,q1ang/seaborn,clarkfitzg/seaborn,dhimmel/seaborn,olgabot/seaborn,Lx37/seaborn,anntzer/seaborn,kyleam/seaborn,oesteban/seaborn,sinhrks/seaborn,parantapa/seaborn,ashhher3/seaborn,wrobstory/seaborn,mclevey/seaborn,anntzer/seaborn,lypzln/seaborn,sauliusl/seaborn,petebachant/seaborn,gef756/seaborn,mwaskom/seaborn,lukauskas/seaborn,huongttlan/seaborn,phobson/seaborn,nileracecrew/seaborn,lukauskas/seaborn,dimarkov/seaborn,arokem/seaborn | """
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = rs.normal(size=(30, 100))
# Compute the correlation matrix
corr = np.corrcoef(d)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
Use dataframe to show how semantic information is used | """
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
from string import letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = pd.DataFrame(data=rs.normal(size=(100, 26)),
columns=list(letters[:26]))
# Compute the correlation matrix
corr = d.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
| <commit_before>"""
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = rs.normal(size=(30, 100))
# Compute the correlation matrix
corr = np.corrcoef(d)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
<commit_msg>Use dataframe to show how semantic information is used<commit_after> | """
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
from string import letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = pd.DataFrame(data=rs.normal(size=(100, 26)),
columns=list(letters[:26]))
# Compute the correlation matrix
corr = d.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
| """
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = rs.normal(size=(30, 100))
# Compute the correlation matrix
corr = np.corrcoef(d)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
Use dataframe to show how semantic information is used"""
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
from string import letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = pd.DataFrame(data=rs.normal(size=(100, 26)),
columns=list(letters[:26]))
# Compute the correlation matrix
corr = d.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
| <commit_before>"""
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = rs.normal(size=(30, 100))
# Compute the correlation matrix
corr = np.corrcoef(d)
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
<commit_msg>Use dataframe to show how semantic information is used<commit_after>"""
Plotting a diagonal correlation matrix
======================================
_thumb: .3, .6
"""
from string import letters
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = pd.DataFrame(data=rs.normal(size=(100, 26)),
columns=list(letters[:26]))
# Compute the correlation matrix
corr = d.corr()
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3,
square=True, xticklabels=5, yticklabels=5,
linewidths=.5, cbar_kws={"shrink": .5}, ax=ax)
|
5fe71408c740697ebe0b268e9fbac93f5932ef63 | froide/foirequest/search_indexes.py | froide/foirequest/search_indexes.py | from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
| from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
summary = indexes.CharField(model_attr='summary')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
| Add summary to foirequest search | Add summary to foirequest search | Python | mit | ryankanno/froide,stefanw/froide,LilithWittmann/froide,LilithWittmann/froide,stefanw/froide,fin/froide,fin/froide,stefanw/froide,catcosmo/froide,stefanw/froide,CodeforHawaii/froide,okfse/froide,LilithWittmann/froide,ryankanno/froide,catcosmo/froide,CodeforHawaii/froide,ryankanno/froide,catcosmo/froide,ryankanno/froide,catcosmo/froide,fin/froide,CodeforHawaii/froide,okfse/froide,okfse/froide,LilithWittmann/froide,okfse/froide,fin/froide,catcosmo/froide,CodeforHawaii/froide,okfse/froide,CodeforHawaii/froide,stefanw/froide,LilithWittmann/froide,ryankanno/froide | from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
Add summary to foirequest search | from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
summary = indexes.CharField(model_attr='summary')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
| <commit_before>from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
<commit_msg>Add summary to foirequest search<commit_after> | from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
summary = indexes.CharField(model_attr='summary')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
| from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
Add summary to foirequest searchfrom haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
summary = indexes.CharField(model_attr='summary')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
| <commit_before>from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
<commit_msg>Add summary to foirequest search<commit_after>from haystack import indexes
from haystack import site
from foirequest.models import FoiRequest
class FoiRequestIndex(indexes.SearchIndex):
text = indexes.EdgeNgramField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
description = indexes.CharField(model_attr='description')
summary = indexes.CharField(model_attr='summary')
status = indexes.CharField(model_attr='status')
first_message = indexes.DateTimeField(model_attr='first_message')
last_message = indexes.DateTimeField(model_attr='last_message')
url = indexes.CharField(model_attr='get_absolute_url')
def get_queryset(self):
"""Used when the entire index for model is updated."""
return FoiRequest.objects.get_for_search_index()
site.register(FoiRequest, FoiRequestIndex)
|
99aac92ca2a4958b7daff7b64d52c0e58db3554c | opal/tests/test_core_views.py | opal/tests/test_core_views.py | """
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
| """
Unittests for opal.core.views
"""
import warnings
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
class BuildJSONResponseTestCase(test.OpalTestCase):
def test_underscore_spelling_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
r = views._build_json_response({})
self.assertEqual(200, r.status_code)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "will be removed" in str(w[-1].message)
| Add test for warn spelling of build_json_response | Add test for warn spelling of build_json_response
| Python | agpl-3.0 | khchine5/opal,khchine5/opal,khchine5/opal | """
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
Add test for warn spelling of build_json_response | """
Unittests for opal.core.views
"""
import warnings
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
class BuildJSONResponseTestCase(test.OpalTestCase):
def test_underscore_spelling_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
r = views._build_json_response({})
self.assertEqual(200, r.status_code)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "will be removed" in str(w[-1].message)
| <commit_before>"""
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
<commit_msg>Add test for warn spelling of build_json_response<commit_after> | """
Unittests for opal.core.views
"""
import warnings
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
class BuildJSONResponseTestCase(test.OpalTestCase):
def test_underscore_spelling_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
r = views._build_json_response({})
self.assertEqual(200, r.status_code)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "will be removed" in str(w[-1].message)
| """
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
Add test for warn spelling of build_json_response"""
Unittests for opal.core.views
"""
import warnings
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
class BuildJSONResponseTestCase(test.OpalTestCase):
def test_underscore_spelling_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
r = views._build_json_response({})
self.assertEqual(200, r.status_code)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "will be removed" in str(w[-1].message)
| <commit_before>"""
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
<commit_msg>Add test for warn spelling of build_json_response<commit_after>"""
Unittests for opal.core.views
"""
import warnings
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
class BuildJSONResponseTestCase(test.OpalTestCase):
def test_underscore_spelling_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
r = views._build_json_response({})
self.assertEqual(200, r.status_code)
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "will be removed" in str(w[-1].message)
|
452bad78228f5cb26c0ed9cc7aa48497f1d3f7f0 | apps/tasks/signals.py | apps/tasks/signals.py | import logging
import django.dispatch
from django.dispatch import receiver
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import Task, TaskStatus
task = Task.objects.get(pk=task_id)
if task is None:
logger.warning('Could not find task #{}'.format(task_id))
return
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task.status = sparrow_status
task.save()
| import logging
import django.dispatch
from django.dispatch import receiver
import requests
from common.utils import build_absolute_uri
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import TaskStatus
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task_url = build_absolute_uri('/api/v1/tasks/{}/'.format(task_id))
response = requests.patch(task_url, json={
'status': sparrow_status
})
response.raise_for_status()
| Update status using API instead of direct db access | Update status using API instead of direct db access
| Python | mit | Wikia/sparrow,harnash/sparrow,Wikia/sparrow,Wikia/sparrow | import logging
import django.dispatch
from django.dispatch import receiver
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import Task, TaskStatus
task = Task.objects.get(pk=task_id)
if task is None:
logger.warning('Could not find task #{}'.format(task_id))
return
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task.status = sparrow_status
task.save()
Update status using API instead of direct db access | import logging
import django.dispatch
from django.dispatch import receiver
import requests
from common.utils import build_absolute_uri
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import TaskStatus
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task_url = build_absolute_uri('/api/v1/tasks/{}/'.format(task_id))
response = requests.patch(task_url, json={
'status': sparrow_status
})
response.raise_for_status()
| <commit_before>import logging
import django.dispatch
from django.dispatch import receiver
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import Task, TaskStatus
task = Task.objects.get(pk=task_id)
if task is None:
logger.warning('Could not find task #{}'.format(task_id))
return
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task.status = sparrow_status
task.save()
<commit_msg>Update status using API instead of direct db access<commit_after> | import logging
import django.dispatch
from django.dispatch import receiver
import requests
from common.utils import build_absolute_uri
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import TaskStatus
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task_url = build_absolute_uri('/api/v1/tasks/{}/'.format(task_id))
response = requests.patch(task_url, json={
'status': sparrow_status
})
response.raise_for_status()
| import logging
import django.dispatch
from django.dispatch import receiver
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import Task, TaskStatus
task = Task.objects.get(pk=task_id)
if task is None:
logger.warning('Could not find task #{}'.format(task_id))
return
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task.status = sparrow_status
task.save()
Update status using API instead of direct db accessimport logging
import django.dispatch
from django.dispatch import receiver
import requests
from common.utils import build_absolute_uri
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import TaskStatus
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task_url = build_absolute_uri('/api/v1/tasks/{}/'.format(task_id))
response = requests.patch(task_url, json={
'status': sparrow_status
})
response.raise_for_status()
| <commit_before>import logging
import django.dispatch
from django.dispatch import receiver
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import Task, TaskStatus
task = Task.objects.get(pk=task_id)
if task is None:
logger.warning('Could not find task #{}'.format(task_id))
return
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task.status = sparrow_status
task.save()
<commit_msg>Update status using API instead of direct db access<commit_after>import logging
import django.dispatch
from django.dispatch import receiver
import requests
from common.utils import build_absolute_uri
logger = logging.getLogger(__name__)
__all__ = ['task_status_changed', 'celery_request_status_update']
task_status_changed = django.dispatch.Signal(providing_args=['instance', ])
celery_request_status_update = django.dispatch.Signal(providing_args=['task_id', 'job_id', 'status'])
@receiver(celery_request_status_update)
def on_task_manager_request_status_update(sender, task_id, job_id, status, **kwargs):
logger.debug('on_task_manager_request_status_update {} {}'.format(job_id, status))
from tasks.models import TaskStatus
sparrow_status = TaskStatus.from_celery_status(status)
logger.debug('Task #{} received status update from celery ({}): status = {}'.format(
task_id, job_id, TaskStatus.label(sparrow_status)))
task_url = build_absolute_uri('/api/v1/tasks/{}/'.format(task_id))
response = requests.patch(task_url, json={
'status': sparrow_status
})
response.raise_for_status()
|
057ddbf6ee529fff9f511a9cc8bc8c1a20a91833 | helpers.py | helpers.py | import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
| import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
| Add another blank line (thanks flake) --autopull | Add another blank line (thanks flake) --autopull | Python | apache-2.0 | Charcoal-SE/SmokeDetector,Charcoal-SE/SmokeDetector | import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
Add another blank line (thanks flake) --autopull | import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
| <commit_before>import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
<commit_msg>Add another blank line (thanks flake) --autopull<commit_after> | import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
| import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
Add another blank line (thanks flake) --autopullimport os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
| <commit_before>import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
<commit_msg>Add another blank line (thanks flake) --autopull<commit_after>import os
from collections import namedtuple
Response = namedtuple('Response', 'command_status message')
# Allows use of `environ_or_none("foo") or "default"` shorthand
# noinspection PyBroadException
def environ_or_none(key):
try:
return os.environ[key]
except:
return None
# Checks that all items in a pattern-matching product name are unique
def all_matches_unique(match):
return len(match[0][1::2]) == len(set(match[0][1::2]))
|
a0e432b0ac31ed74256197b1d5df8b6f8a0987db | product/models.py | product/models.py | from django.db import models
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from satchless.util.models import Subtyped
from satchless.item import ItemRange
from mptt.models import MPTTModel
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
slug = models.SlugField(_('Product field', 'slug'), max_length=50,
unique=True)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
| from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from mptt.models import MPTTModel
from satchless.item import ItemRange
from satchless.util.models import Subtyped
from unidecode import unidecode
import re
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
def get_slug(self):
value = unidecode(self.name)
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
@models.permalink
def get_absolute_url(self):
return ('product:details', [self.get_slug(), self.id])
| Replace slug field with get_slug function | Replace slug field with get_slug function
| Python | bsd-3-clause | laosunhust/saleor,mociepka/saleor,paweltin/saleor,mociepka/saleor,jreigel/saleor,taedori81/saleor,UITools/saleor,UITools/saleor,spartonia/saleor,car3oon/saleor,Drekscott/Motlaesaleor,UITools/saleor,HyperManTT/ECommerceSaleor,paweltin/saleor,maferelo/saleor,dashmug/saleor,rodrigozn/CW-Shop,laosunhust/saleor,avorio/saleor,hongquan/saleor,taedori81/saleor,paweltin/saleor,tfroehlich82/saleor,rodrigozn/CW-Shop,car3oon/saleor,arth-co/saleor,HyperManTT/ECommerceSaleor,mociepka/saleor,tfroehlich82/saleor,josesanch/saleor,arth-co/saleor,avorio/saleor,itbabu/saleor,HyperManTT/ECommerceSaleor,itbabu/saleor,arth-co/saleor,Drekscott/Motlaesaleor,rodrigozn/CW-Shop,jreigel/saleor,rchav/vinerack,taedori81/saleor,taedori81/saleor,avorio/saleor,spartonia/saleor,itbabu/saleor,avorio/saleor,paweltin/saleor,jreigel/saleor,KenMutemi/saleor,laosunhust/saleor,josesanch/saleor,UITools/saleor,dashmug/saleor,rchav/vinerack,KenMutemi/saleor,arth-co/saleor,KenMutemi/saleor,UITools/saleor,Drekscott/Motlaesaleor,maferelo/saleor,spartonia/saleor,car3oon/saleor,hongquan/saleor,laosunhust/saleor,Drekscott/Motlaesaleor,dashmug/saleor,rchav/vinerack,maferelo/saleor,josesanch/saleor,hongquan/saleor,tfroehlich82/saleor,spartonia/saleor | from django.db import models
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from satchless.util.models import Subtyped
from satchless.item import ItemRange
from mptt.models import MPTTModel
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
slug = models.SlugField(_('Product field', 'slug'), max_length=50,
unique=True)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
Replace slug field with get_slug function | from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from mptt.models import MPTTModel
from satchless.item import ItemRange
from satchless.util.models import Subtyped
from unidecode import unidecode
import re
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
def get_slug(self):
value = unidecode(self.name)
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
@models.permalink
def get_absolute_url(self):
return ('product:details', [self.get_slug(), self.id])
| <commit_before>from django.db import models
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from satchless.util.models import Subtyped
from satchless.item import ItemRange
from mptt.models import MPTTModel
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
slug = models.SlugField(_('Product field', 'slug'), max_length=50,
unique=True)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
<commit_msg>Replace slug field with get_slug function<commit_after> | from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from mptt.models import MPTTModel
from satchless.item import ItemRange
from satchless.util.models import Subtyped
from unidecode import unidecode
import re
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
def get_slug(self):
value = unidecode(self.name)
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
@models.permalink
def get_absolute_url(self):
return ('product:details', [self.get_slug(), self.id])
| from django.db import models
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from satchless.util.models import Subtyped
from satchless.item import ItemRange
from mptt.models import MPTTModel
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
slug = models.SlugField(_('Product field', 'slug'), max_length=50,
unique=True)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
Replace slug field with get_slug functionfrom django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from mptt.models import MPTTModel
from satchless.item import ItemRange
from satchless.util.models import Subtyped
from unidecode import unidecode
import re
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
def get_slug(self):
value = unidecode(self.name)
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
@models.permalink
def get_absolute_url(self):
return ('product:details', [self.get_slug(), self.id])
| <commit_before>from django.db import models
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from satchless.util.models import Subtyped
from satchless.item import ItemRange
from mptt.models import MPTTModel
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
slug = models.SlugField(_('Product field', 'slug'), max_length=50,
unique=True)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
<commit_msg>Replace slug field with get_slug function<commit_after>from django.db import models
from django.utils.safestring import mark_safe
from django.utils.translation import pgettext as _
from django_prices.models import PriceField
from mptt.models import MPTTModel
from satchless.item import ItemRange
from satchless.util.models import Subtyped
from unidecode import unidecode
import re
class Category(MPTTModel):
name = models.CharField(_('Category field', 'name'), max_length=128)
slug = models.SlugField(_('Category field', 'slug'), max_length=50,
unique=True)
description = models.TextField(_('Category field', 'description'),
blank=True)
parent = models.ForeignKey('self', null=True, related_name='children',
blank=True,
verbose_name=_('Category field', 'parent'))
def __unicode__(self):
return self.name
class Product(Subtyped, ItemRange):
name = models.CharField(_('Product field', 'name'), max_length=128)
price = PriceField(_('Product field', 'price'), currency='USD',
max_digits=12, decimal_places=4)
category = models.ForeignKey(Category,
verbose_name=_('Product field', 'category'))
def __unicode__(self):
return self.name
def get_slug(self):
value = unidecode(self.name)
value = re.sub('[^\w\s-]', '', value).strip().lower()
return mark_safe(re.sub('[-\s]+', '-', value))
@models.permalink
def get_absolute_url(self):
return ('product:details', [self.get_slug(), self.id])
|
66a5c4199d3e8bbea5ed40e92fef4be60425f827 | pmxbot/dictlib.py | pmxbot/dictlib.py | import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
| import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.safe_load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
| Use safe-load when loading configuration to avoid deprecation warning | Use safe-load when loading configuration to avoid deprecation warning
| Python | mit | yougov/pmxbot,yougov/pmxbot,yougov/pmxbot | import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
Use safe-load when loading configuration to avoid deprecation warning | import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.safe_load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
| <commit_before>import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
<commit_msg>Use safe-load when loading configuration to avoid deprecation warning<commit_after> | import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.safe_load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
| import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
Use safe-load when loading configuration to avoid deprecation warningimport yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.safe_load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
| <commit_before>import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
<commit_msg>Use safe-load when loading configuration to avoid deprecation warning<commit_after>import yaml
from jaraco.collections import ItemsAsAttributes
class ConfigDict(ItemsAsAttributes, dict):
@classmethod
def from_yaml(cls, filename):
with open(filename) as f:
return cls(yaml.safe_load(f))
def to_yaml(self, filename):
dumper = getattr(yaml, 'danger_dump', yaml.dump)
with open(filename, 'w') as f:
dumper(self, f)
|
c08013dc2fc32582e8636d84be3e2f68dafe11a0 | controller.py | controller.py | """NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Delegate.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Delegate.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
| """NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Controller.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Controller.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
| Change to Controller from Delegate shell | Change to Controller from Delegate shell
| Python | bsd-3-clause | dreibh/planetlab-lxc-nodemanager,planetlab/NodeManager,planetlab/NodeManager,planetlab/NodeManager,dreibh/planetlab-lxc-nodemanager,planetlab/NodeManager,dreibh/planetlab-lxc-nodemanager | """NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Delegate.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Delegate.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
Change to Controller from Delegate shell | """NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Controller.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Controller.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
| <commit_before>"""NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Delegate.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Delegate.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
<commit_msg>Change to Controller from Delegate shell<commit_after> | """NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Controller.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Controller.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
| """NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Delegate.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Delegate.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
Change to Controller from Delegate shell"""NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Controller.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Controller.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
| <commit_before>"""NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Delegate.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Delegate.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
<commit_msg>Change to Controller from Delegate shell<commit_after>"""NM-Controller accounts are used to provide secure access to the XMLRPC API. They are normal Unix accounts with a shell that tunnels XMLRPC requests to the API server."""
import accounts
import logger
import tools
class Controller(accounts.Account):
SHELL = '/usr/bin/forward_api_calls' # tunneling shell
TYPE = 'controller'
@staticmethod
def create(name, vref = None):
add_shell(Controller.SHELL)
logger.log_call('/usr/sbin/useradd', '-p', '*', '-s', Controller.SHELL, name)
@staticmethod
def destroy(name): logger.log_call('/usr/sbin/userdel', '-r', name)
def add_shell(shell):
"""Add <shell> to /etc/shells if it's not already there."""
etc_shells = open('/etc/shells')
valid_shells = etc_shells.read().split()
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
print >>etc_shells, shell
etc_shells.close()
|
21f27eecae407e3baa8f55effd7b19ef2fa9ca6d | jug.py | jug.py | from store import create_directories
import options
import task
def parse_jugfile():
import jugfile
def print_tasks():
for i,t in enumerate(task.alltasks):
print 'Task %s: %s' % (i,t.name)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
else:
t.run()
tasks.remove(t)
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
parse_jugfile()
print_tasks()
execute_tasks()
print_tasks()
if __name__ == '__main__':
main()
| from collections import defaultdict
from store import create_directories
import options
import task
import jugfile
def print_tasks():
task_counts = defaultdict(int)
for t in task.alltasks:
task_counts[t.name] += 1
for tnc in task_counts.items():
print 'Task %s: %s' % tnc
task_names = set(t.name for t in task.alltasks)
tasks_executed = defaultdict(int)
tasks_loaded = defaultdict(int)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
tasks_loaded[t.name] += 1
else:
t.run()
tasks_executed[t.name] += 1
tasks.remove(t)
def print_fstats():
print '%-20s%12s%12s' %('Task name','Executed','Loaded')
print ('-' * (20+12+12))
for t in task_names:
print '%-20s%12s%12s' % (t,tasks_executed[t],tasks_loaded[t])
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
execute_tasks()
print_fstats()
if __name__ == '__main__':
main()
| Print a little summary when you're done | Print a little summary when you're done
| Python | mit | unode/jug,unode/jug,luispedro/jug,luispedro/jug | from store import create_directories
import options
import task
def parse_jugfile():
import jugfile
def print_tasks():
for i,t in enumerate(task.alltasks):
print 'Task %s: %s' % (i,t.name)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
else:
t.run()
tasks.remove(t)
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
parse_jugfile()
print_tasks()
execute_tasks()
print_tasks()
if __name__ == '__main__':
main()
Print a little summary when you're done | from collections import defaultdict
from store import create_directories
import options
import task
import jugfile
def print_tasks():
task_counts = defaultdict(int)
for t in task.alltasks:
task_counts[t.name] += 1
for tnc in task_counts.items():
print 'Task %s: %s' % tnc
task_names = set(t.name for t in task.alltasks)
tasks_executed = defaultdict(int)
tasks_loaded = defaultdict(int)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
tasks_loaded[t.name] += 1
else:
t.run()
tasks_executed[t.name] += 1
tasks.remove(t)
def print_fstats():
print '%-20s%12s%12s' %('Task name','Executed','Loaded')
print ('-' * (20+12+12))
for t in task_names:
print '%-20s%12s%12s' % (t,tasks_executed[t],tasks_loaded[t])
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
execute_tasks()
print_fstats()
if __name__ == '__main__':
main()
| <commit_before>from store import create_directories
import options
import task
def parse_jugfile():
import jugfile
def print_tasks():
for i,t in enumerate(task.alltasks):
print 'Task %s: %s' % (i,t.name)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
else:
t.run()
tasks.remove(t)
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
parse_jugfile()
print_tasks()
execute_tasks()
print_tasks()
if __name__ == '__main__':
main()
<commit_msg>Print a little summary when you're done<commit_after> | from collections import defaultdict
from store import create_directories
import options
import task
import jugfile
def print_tasks():
task_counts = defaultdict(int)
for t in task.alltasks:
task_counts[t.name] += 1
for tnc in task_counts.items():
print 'Task %s: %s' % tnc
task_names = set(t.name for t in task.alltasks)
tasks_executed = defaultdict(int)
tasks_loaded = defaultdict(int)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
tasks_loaded[t.name] += 1
else:
t.run()
tasks_executed[t.name] += 1
tasks.remove(t)
def print_fstats():
print '%-20s%12s%12s' %('Task name','Executed','Loaded')
print ('-' * (20+12+12))
for t in task_names:
print '%-20s%12s%12s' % (t,tasks_executed[t],tasks_loaded[t])
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
execute_tasks()
print_fstats()
if __name__ == '__main__':
main()
| from store import create_directories
import options
import task
def parse_jugfile():
import jugfile
def print_tasks():
for i,t in enumerate(task.alltasks):
print 'Task %s: %s' % (i,t.name)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
else:
t.run()
tasks.remove(t)
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
parse_jugfile()
print_tasks()
execute_tasks()
print_tasks()
if __name__ == '__main__':
main()
Print a little summary when you're donefrom collections import defaultdict
from store import create_directories
import options
import task
import jugfile
def print_tasks():
task_counts = defaultdict(int)
for t in task.alltasks:
task_counts[t.name] += 1
for tnc in task_counts.items():
print 'Task %s: %s' % tnc
task_names = set(t.name for t in task.alltasks)
tasks_executed = defaultdict(int)
tasks_loaded = defaultdict(int)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
tasks_loaded[t.name] += 1
else:
t.run()
tasks_executed[t.name] += 1
tasks.remove(t)
def print_fstats():
print '%-20s%12s%12s' %('Task name','Executed','Loaded')
print ('-' * (20+12+12))
for t in task_names:
print '%-20s%12s%12s' % (t,tasks_executed[t],tasks_loaded[t])
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
execute_tasks()
print_fstats()
if __name__ == '__main__':
main()
| <commit_before>from store import create_directories
import options
import task
def parse_jugfile():
import jugfile
def print_tasks():
for i,t in enumerate(task.alltasks):
print 'Task %s: %s' % (i,t.name)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
else:
t.run()
tasks.remove(t)
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
parse_jugfile()
print_tasks()
execute_tasks()
print_tasks()
if __name__ == '__main__':
main()
<commit_msg>Print a little summary when you're done<commit_after>from collections import defaultdict
from store import create_directories
import options
import task
import jugfile
def print_tasks():
task_counts = defaultdict(int)
for t in task.alltasks:
task_counts[t.name] += 1
for tnc in task_counts.items():
print 'Task %s: %s' % tnc
task_names = set(t.name for t in task.alltasks)
tasks_executed = defaultdict(int)
tasks_loaded = defaultdict(int)
def execute_tasks():
tasks = task.alltasks
while tasks:
ready = [t for t in tasks if t.can_run()]
if len(ready) == 0:
print 'No tasks can be run!'
return
for t in ready:
if t.can_load():
t.load()
tasks_loaded[t.name] += 1
else:
t.run()
tasks_executed[t.name] += 1
tasks.remove(t)
def print_fstats():
print '%-20s%12s%12s' %('Task name','Executed','Loaded')
print ('-' * (20+12+12))
for t in task_names:
print '%-20s%12s%12s' % (t,tasks_executed[t],tasks_loaded[t])
def init():
create_directories(options.datadir + '/tempfiles')
def main():
init()
execute_tasks()
print_fstats()
if __name__ == '__main__':
main()
|
d2730af02bf9032ec5bb10b07f8fae6fab2bba52 | fellowms/management/commands/applicants_data.py | fellowms/management/commands/applicants_data.py | import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
}
applicant = Fellow(**applicants_dict)
applicant.save()
| import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
"fellowship_grant": 3000 if is_fellow else 0,
}
applicant = Fellow(**applicants_dict)
applicant.save()
| Fix fellowship_grant when upload data | Fix fellowship_grant when upload data
| Python | bsd-3-clause | softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat | import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
}
applicant = Fellow(**applicants_dict)
applicant.save()
Fix fellowship_grant when upload data | import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
"fellowship_grant": 3000 if is_fellow else 0,
}
applicant = Fellow(**applicants_dict)
applicant.save()
| <commit_before>import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
}
applicant = Fellow(**applicants_dict)
applicant.save()
<commit_msg>Fix fellowship_grant when upload data<commit_after> | import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
"fellowship_grant": 3000 if is_fellow else 0,
}
applicant = Fellow(**applicants_dict)
applicant.save()
| import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
}
applicant = Fellow(**applicants_dict)
applicant.save()
Fix fellowship_grant when upload dataimport pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
"fellowship_grant": 3000 if is_fellow else 0,
}
applicant = Fellow(**applicants_dict)
applicant.save()
| <commit_before>import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
}
applicant = Fellow(**applicants_dict)
applicant.save()
<commit_msg>Fix fellowship_grant when upload data<commit_after>import pandas as pd
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ObjectDoesNotExist
from fellowms.models import Fellow
class Command(BaseCommand):
help = "Add old information to database."
# TODO Make use of args and options.
def handle(self, *args, **options):
data = pd.read_csv('all_applications_details.csv')
for idx, line in data.iterrows():
if line['Selected']=='Yes':
is_fellow=True
else:
is_fellow=False
applicants_dict = {
"application_year": line["Inauguration year"],
"selected": is_fellow,
"forenames": line["Forename(s)"],
"surname": line["Surname"],
"affiliation": line["Home institution"],
"research_area": line["Research area"],
"research_area_code": line["Research classification"],
"email": line["E-mail"],
"phone": line["Telephone"],
"gender": line["Gender"] if line["Gender"] else 'R',
"work_description": line["Work area"],
"funding": "{}, {}".format(line["Primary funder"],line["Additional funder"]),
"fellowship_grant": 3000 if is_fellow else 0,
}
applicant = Fellow(**applicants_dict)
applicant.save()
|
9f9d78fd7a5011f24b5e12249f1276010293d877 | bin/upload_version.py | bin/upload_version.py | #!python
import os
import sys
import json
import requests
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False)
| #!python
import os
import sys
import json
import requests
import subprocess
def capture_output(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
return proc.stdout.read()
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
commit_body = capture_output(["git", "log", "--format=%b", "-n", "1", commit_sha])
file_md5_checksum = capture_output(["md5sum", filename]).split()[0]
file_sha256_checksum = capture_output(["sha256sum", filename]).split()[0]
version_body = "%s\n\nMD5: %s\nSHA256: %s" % (commit_body, file_md5_checksum, file_sha256_checksum)
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'body': version_body,
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth,
headers=headers, verify=False)
| Update upload script to include checksums | Update upload script to include checksums
| Python | bsd-2-clause | useabode/redash,pubnative/redash,rockwotj/redash,chriszs/redash,getredash/redash,44px/redash,hudl/redash,moritz9/redash,imsally/redash,getredash/redash,alexanderlz/redash,akariv/redash,rockwotj/redash,alexanderlz/redash,44px/redash,guaguadev/redash,vishesh92/redash,imsally/redash,pubnative/redash,easytaxibr/redash,EverlyWell/redash,M32Media/redash,easytaxibr/redash,akariv/redash,44px/redash,guaguadev/redash,amino-data/redash,vishesh92/redash,ninneko/redash,guaguadev/redash,stefanseifert/redash,hudl/redash,pubnative/redash,useabode/redash,moritz9/redash,amino-data/redash,M32Media/redash,getredash/redash,guaguadev/redash,jmvasquez/redashtest,M32Media/redash,44px/redash,chriszs/redash,hudl/redash,alexanderlz/redash,akariv/redash,useabode/redash,rockwotj/redash,stefanseifert/redash,ninneko/redash,useabode/redash,pubnative/redash,akariv/redash,easytaxibr/redash,ninneko/redash,ninneko/redash,pubnative/redash,stefanseifert/redash,vishesh92/redash,vishesh92/redash,crowdworks/redash,M32Media/redash,jmvasquez/redashtest,amino-data/redash,guaguadev/redash,chriszs/redash,crowdworks/redash,getredash/redash,EverlyWell/redash,denisov-vlad/redash,imsally/redash,EverlyWell/redash,crowdworks/redash,getredash/redash,rockwotj/redash,easytaxibr/redash,denisov-vlad/redash,stefanseifert/redash,denisov-vlad/redash,crowdworks/redash,ninneko/redash,jmvasquez/redashtest,EverlyWell/redash,jmvasquez/redashtest,hudl/redash,denisov-vlad/redash,imsally/redash,akariv/redash,jmvasquez/redashtest,alexanderlz/redash,denisov-vlad/redash,easytaxibr/redash,moritz9/redash,moritz9/redash,chriszs/redash,amino-data/redash,stefanseifert/redash | #!python
import os
import sys
import json
import requests
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False)
Update upload script to include checksums | #!python
import os
import sys
import json
import requests
import subprocess
def capture_output(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
return proc.stdout.read()
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
commit_body = capture_output(["git", "log", "--format=%b", "-n", "1", commit_sha])
file_md5_checksum = capture_output(["md5sum", filename]).split()[0]
file_sha256_checksum = capture_output(["sha256sum", filename]).split()[0]
version_body = "%s\n\nMD5: %s\nSHA256: %s" % (commit_body, file_md5_checksum, file_sha256_checksum)
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'body': version_body,
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth,
headers=headers, verify=False)
| <commit_before>#!python
import os
import sys
import json
import requests
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False)
<commit_msg>Update upload script to include checksums<commit_after> | #!python
import os
import sys
import json
import requests
import subprocess
def capture_output(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
return proc.stdout.read()
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
commit_body = capture_output(["git", "log", "--format=%b", "-n", "1", commit_sha])
file_md5_checksum = capture_output(["md5sum", filename]).split()[0]
file_sha256_checksum = capture_output(["sha256sum", filename]).split()[0]
version_body = "%s\n\nMD5: %s\nSHA256: %s" % (commit_body, file_md5_checksum, file_sha256_checksum)
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'body': version_body,
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth,
headers=headers, verify=False)
| #!python
import os
import sys
import json
import requests
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False)
Update upload script to include checksums#!python
import os
import sys
import json
import requests
import subprocess
def capture_output(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
return proc.stdout.read()
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
commit_body = capture_output(["git", "log", "--format=%b", "-n", "1", commit_sha])
file_md5_checksum = capture_output(["md5sum", filename]).split()[0]
file_sha256_checksum = capture_output(["sha256sum", filename]).split()[0]
version_body = "%s\n\nMD5: %s\nSHA256: %s" % (commit_body, file_md5_checksum, file_sha256_checksum)
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'body': version_body,
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth,
headers=headers, verify=False)
| <commit_before>#!python
import os
import sys
import json
import requests
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth, headers=headers, verify=False)
<commit_msg>Update upload script to include checksums<commit_after>#!python
import os
import sys
import json
import requests
import subprocess
def capture_output(command):
proc = subprocess.Popen(command, stdout=subprocess.PIPE)
return proc.stdout.read()
if __name__ == '__main__':
version = sys.argv[1]
filepath = sys.argv[2]
filename = filepath.split('/')[-1]
github_token = os.environ['GITHUB_TOKEN']
auth = (github_token, 'x-oauth-basic')
commit_sha = os.environ['CIRCLE_SHA1']
commit_body = capture_output(["git", "log", "--format=%b", "-n", "1", commit_sha])
file_md5_checksum = capture_output(["md5sum", filename]).split()[0]
file_sha256_checksum = capture_output(["sha256sum", filename]).split()[0]
version_body = "%s\n\nMD5: %s\nSHA256: %s" % (commit_body, file_md5_checksum, file_sha256_checksum)
params = json.dumps({
'tag_name': 'v{0}'.format(version),
'name': 're:dash v{0}'.format(version),
'body': version_body,
'target_commitish': commit_sha,
'prerelease': True
})
response = requests.post('https://api.github.com/repos/everythingme/redash/releases',
data=params,
auth=auth)
upload_url = response.json()['upload_url']
upload_url = upload_url.replace('{?name}', '')
with open(filepath) as file_content:
headers = {'Content-Type': 'application/gzip'}
response = requests.post(upload_url, file_content, params={'name': filename}, auth=auth,
headers=headers, verify=False)
|
b295e3e64367073550ceb00faa72e6564f08dd55 | pytest_cookies.py | pytest_cookies.py | # -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
error = None
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except Exception as e:
self.error = e
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
| # -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
exception = None
exit_code = 0
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except SystemExit as e:
if e.code != 0:
self.exception = e
self.exit_code = e.code
except Exception as e:
self.exception = e
self.exit_code = -1
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
| Handle SystemExit errors and add exit_code | Handle SystemExit errors and add exit_code
| Python | mit | hackebrot/pytest-cookies | # -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
error = None
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except Exception as e:
self.error = e
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
Handle SystemExit errors and add exit_code | # -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
exception = None
exit_code = 0
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except SystemExit as e:
if e.code != 0:
self.exception = e
self.exit_code = e.code
except Exception as e:
self.exception = e
self.exit_code = -1
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
| <commit_before># -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
error = None
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except Exception as e:
self.error = e
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
<commit_msg>Handle SystemExit errors and add exit_code<commit_after> | # -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
exception = None
exit_code = 0
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except SystemExit as e:
if e.code != 0:
self.exception = e
self.exit_code = e.code
except Exception as e:
self.exception = e
self.exit_code = -1
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
| # -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
error = None
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except Exception as e:
self.error = e
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
Handle SystemExit errors and add exit_code# -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
exception = None
exit_code = 0
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except SystemExit as e:
if e.code != 0:
self.exception = e
self.exit_code = e.code
except Exception as e:
self.exception = e
self.exit_code = -1
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
| <commit_before># -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
error = None
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except Exception as e:
self.error = e
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
<commit_msg>Handle SystemExit errors and add exit_code<commit_after># -*- coding: utf-8 -*-
import pytest
from cookiecutter.main import cookiecutter
class Cookies(object):
"""Class to provide convenient access to the cookiecutter API."""
exception = None
exit_code = 0
project = None
def __init__(self, template, output_dir):
self._template = template
self._output_dir = output_dir
def bake(self, extra_context=None):
try:
project_dir = cookiecutter(
self._template,
no_input=True,
extra_context=extra_context,
output_dir=self._output_dir
)
except SystemExit as e:
if e.code != 0:
self.exception = e
self.exit_code = e.code
except Exception as e:
self.exception = e
self.exit_code = -1
else:
self.project = project_dir
@pytest.fixture
def cookies(request, tmpdir):
output_dir = request.config.option.output_dir
if not output_dir:
output_dir = str(tmpdir.mkdir('cookies_output'))
_cookies = Cookies('.', output_dir)
return _cookies
def pytest_addoption(parser):
group = parser.getgroup('cookies')
group.addoption(
'--output-dir',
action='store',
dest='output_dir',
help='Set the output directory for Cookiecutter'
)
parser.addini('HELLO', 'Dummy pytest.ini setting')
|
4baa59fd3ea90b604a8cdb5f4daf9ddb61f2e34a | aybu/manager/cli/task.py | aybu/manager/cli/task.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
try:
response, content = self.api.get(self.get_url(task, 'logs'))
except ValueError:
pass
else:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
response, content = self.api.get(self.get_url(task, 'logs'))
if content:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
| Fix error on connection failures | Fix error on connection failures
| Python | apache-2.0 | asidev/aybu-manager-cli,asidev/aybu-manager-cli | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
try:
response, content = self.api.get(self.get_url(task, 'logs'))
except ValueError:
pass
else:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
Fix error on connection failures | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
response, content = self.api.get(self.get_url(task, 'logs'))
if content:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
| <commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
try:
response, content = self.api.get(self.get_url(task, 'logs'))
except ValueError:
pass
else:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
<commit_msg>Fix error on connection failures<commit_after> | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
response, content = self.api.get(self.get_url(task, 'logs'))
if content:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
try:
response, content = self.api.get(self.get_url(task, 'logs'))
except ValueError:
pass
else:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
Fix error on connection failures#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
response, content = self.api.get(self.get_url(task, 'logs'))
if content:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
| <commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
try:
response, content = self.api.get(self.get_url(task, 'logs'))
except ValueError:
pass
else:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
<commit_msg>Fix error on connection failures<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from . interface import BaseInterface
class TaskInterface(BaseInterface):
commands = ['list', 'logs', 'delete', 'flush', 'info', 'flush_logs']
name = 'tasks'
def logs(self, task):
response, content = self.api.get(self.get_url(task, 'logs'))
if content:
for log in content:
print log.strip()
def flush(self):
self.api.delete(self.root_url, quiet=False)
def flush_logs(self, task):
self.api.delete(self.get_url(task, 'logs'), quiet=False)
|
f696e320d66f375779692ec073f15d3d6d466059 | edx_data_research/parsing/parse_sql.py | edx_data_research/parsing/parse_sql.py | import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline'])
| import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline', '--drop'])
| Update mongo import of files to drop existing collection first | Update mongo import of files to drop existing collection first
| Python | mit | McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research | import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline'])
Update mongo import of files to drop existing collection first | import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline', '--drop'])
| <commit_before>import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline'])
<commit_msg>Update mongo import of files to drop existing collection first<commit_after> | import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline', '--drop'])
| import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline'])
Update mongo import of files to drop existing collection firstimport subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline', '--drop'])
| <commit_before>import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline'])
<commit_msg>Update mongo import of files to drop existing collection first<commit_after>import subprocess
from edx_data_research.parsing.parse import Parse
class SQL(Parse):
def __init__(self, args):
super(SQL, self).__init__(args)
self._collections = args.collection
self.sql_file = args.sql_file
def migrate(self):
subprocess.check_call(['mongoimport', '-d', self.db_name, '-c',
self._collections, '--type', 'tsv', '--file',
self.sql_file, '--headerline', '--drop'])
|
8a44705413d3a01e897d4a922e7c1383b60a2927 | plugins/VersionUpgrade/VersionUpgrade21to22/__init__.py | plugins/VersionUpgrade/VersionUpgrade21to22/__init__.py | # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
"profile": {
"from": 1,
"to": 2
},
"machine_instance": {
"from": 1,
"to": 2
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
| # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
# From To Upgrade function
("profile", 1): ("instance_container", 2, VersionUpgrade21to22.upgradeProfile),
("machine_instance", 1): ("container_stack", 2, VersionUpgrade21to22.upgradeMachineInstance),
("preferences", 1): ("preferences", 2, VersionUpgrade21to22.upgradePreferences)
},
"sources": {
"profile": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./profiles"}
},
"machine_instance": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./machine_instances"}
},
"preferences": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"."}
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
| Update metadata with dynamic config types | Update metadata with dynamic config types
After settings rework, we decided to make the upgrade plug-ins define their own configuration types. This is basically the definition for these configuration types. Only the get_version function is not yet implemented.
Contributes to issue CURA-844.
| Python | agpl-3.0 | Curahelper/Cura,ynotstartups/Wanhao,hmflash/Cura,totalretribution/Cura,hmflash/Cura,Curahelper/Cura,senttech/Cura,fieldOfView/Cura,senttech/Cura,fieldOfView/Cura,totalretribution/Cura,ynotstartups/Wanhao | # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
"profile": {
"from": 1,
"to": 2
},
"machine_instance": {
"from": 1,
"to": 2
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
Update metadata with dynamic config types
After settings rework, we decided to make the upgrade plug-ins define their own configuration types. This is basically the definition for these configuration types. Only the get_version function is not yet implemented.
Contributes to issue CURA-844. | # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
# From To Upgrade function
("profile", 1): ("instance_container", 2, VersionUpgrade21to22.upgradeProfile),
("machine_instance", 1): ("container_stack", 2, VersionUpgrade21to22.upgradeMachineInstance),
("preferences", 1): ("preferences", 2, VersionUpgrade21to22.upgradePreferences)
},
"sources": {
"profile": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./profiles"}
},
"machine_instance": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./machine_instances"}
},
"preferences": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"."}
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
| <commit_before># Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
"profile": {
"from": 1,
"to": 2
},
"machine_instance": {
"from": 1,
"to": 2
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
<commit_msg>Update metadata with dynamic config types
After settings rework, we decided to make the upgrade plug-ins define their own configuration types. This is basically the definition for these configuration types. Only the get_version function is not yet implemented.
Contributes to issue CURA-844.<commit_after> | # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
# From To Upgrade function
("profile", 1): ("instance_container", 2, VersionUpgrade21to22.upgradeProfile),
("machine_instance", 1): ("container_stack", 2, VersionUpgrade21to22.upgradeMachineInstance),
("preferences", 1): ("preferences", 2, VersionUpgrade21to22.upgradePreferences)
},
"sources": {
"profile": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./profiles"}
},
"machine_instance": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./machine_instances"}
},
"preferences": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"."}
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
| # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
"profile": {
"from": 1,
"to": 2
},
"machine_instance": {
"from": 1,
"to": 2
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
Update metadata with dynamic config types
After settings rework, we decided to make the upgrade plug-ins define their own configuration types. This is basically the definition for these configuration types. Only the get_version function is not yet implemented.
Contributes to issue CURA-844.# Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
# From To Upgrade function
("profile", 1): ("instance_container", 2, VersionUpgrade21to22.upgradeProfile),
("machine_instance", 1): ("container_stack", 2, VersionUpgrade21to22.upgradeMachineInstance),
("preferences", 1): ("preferences", 2, VersionUpgrade21to22.upgradePreferences)
},
"sources": {
"profile": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./profiles"}
},
"machine_instance": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./machine_instances"}
},
"preferences": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"."}
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
| <commit_before># Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
"profile": {
"from": 1,
"to": 2
},
"machine_instance": {
"from": 1,
"to": 2
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
<commit_msg>Update metadata with dynamic config types
After settings rework, we decided to make the upgrade plug-ins define their own configuration types. This is basically the definition for these configuration types. Only the get_version function is not yet implemented.
Contributes to issue CURA-844.<commit_after># Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from . import VersionUpgrade21to22
from UM.i18n import i18nCatalog
catalog = i18nCatalog("cura")
def getMetaData():
return {
"plugin": {
"name": catalog.i18nc("@label", "Version Upgrade 2.1 to 2.2"),
"author": "Ultimaker",
"version": "1.0",
"description": catalog.i18nc("@info:whatsthis", "Upgrades configurations from Cura 2.1 to Cura 2.2."),
"api": 2
},
"version_upgrade": {
# From To Upgrade function
("profile", 1): ("instance_container", 2, VersionUpgrade21to22.upgradeProfile),
("machine_instance", 1): ("container_stack", 2, VersionUpgrade21to22.upgradeMachineInstance),
("preferences", 1): ("preferences", 2, VersionUpgrade21to22.upgradePreferences)
},
"sources": {
"profile": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./profiles"}
},
"machine_instance": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"./machine_instances"}
},
"preferences": {
"get_version": VersionUpgrade21to22.getCfgVersion,
"location": {"."}
}
}
}
def register(app):
return { "version_upgrade": VersionUpgrade21to22.VersionUpgrade21to22() }
|
3e2c7ca2147b28403761cf57dad6d9173a28dc3d | docs/tasks.py | docs/tasks.py | import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('./source/*', build)
server.watch('./source/**/*', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
| import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('../reqon/', build)
server.watch('./source/', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
| Fix directory watching when serving the docs. | Fix directory watching when serving the docs.
| Python | mit | dmpayton/reqon | import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('./source/*', build)
server.watch('./source/**/*', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
Fix directory watching when serving the docs. | import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('../reqon/', build)
server.watch('./source/', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
| <commit_before>import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('./source/*', build)
server.watch('./source/**/*', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
<commit_msg>Fix directory watching when serving the docs.<commit_after> | import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('../reqon/', build)
server.watch('./source/', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
| import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('./source/*', build)
server.watch('./source/**/*', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
Fix directory watching when serving the docs.import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('../reqon/', build)
server.watch('./source/', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
| <commit_before>import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('./source/*', build)
server.watch('./source/**/*', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
<commit_msg>Fix directory watching when serving the docs.<commit_after>import invoke
import livereload
import shutil
server = livereload.Server()
@invoke.task
def clean():
shutil.rmtree('./build')
@invoke.task(pre=[clean])
def build():
invoke.run('sphinx-build ./source ./build', pty=True)
@invoke.task(pre=[build])
def serve():
server.watch('../reqon/', build)
server.watch('./source/', build)
server.serve(
root='./build',
host='localhost',
liveport=35729,
port=8080
)
|
808434912ceb176793a559464f767e2eb52b889d | sanic_limiter/util.py | sanic_limiter/util.py | """
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.ip[0] or '127.0.0.1'
| """
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.remote_addr or request.ip
| Fix IP keyfunc for reverse proxies | Fix IP keyfunc for reverse proxies
| Python | mit | bohea/sanic-limiter | """
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.ip[0] or '127.0.0.1'
Fix IP keyfunc for reverse proxies | """
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.remote_addr or request.ip
| <commit_before>"""
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.ip[0] or '127.0.0.1'
<commit_msg>Fix IP keyfunc for reverse proxies<commit_after> | """
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.remote_addr or request.ip
| """
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.ip[0] or '127.0.0.1'
Fix IP keyfunc for reverse proxies"""
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.remote_addr or request.ip
| <commit_before>"""
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.ip[0] or '127.0.0.1'
<commit_msg>Fix IP keyfunc for reverse proxies<commit_after>"""
"""
def get_remote_address(request):
"""
:param: request: request object of sanic
:return: the ip address of given request (or 127.0.0.1 if none found)
"""
return request.remote_addr or request.ip
|
093c8ac40ba6154ee4a3d3d1430e5b05e68b2e9e | timpani/webserver/webhelpers.py | timpani/webserver/webhelpers.py | import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return response
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
| import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return flask.redirect(path)
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
| Fix legacy return in redirectAndSave | Fix legacy return in redirectAndSave
| Python | mit | ollien/Timpani,ollien/Timpani,ollien/Timpani | import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return response
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
Fix legacy return in redirectAndSave | import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return flask.redirect(path)
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
| <commit_before>import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return response
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
<commit_msg>Fix legacy return in redirectAndSave<commit_after> | import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return flask.redirect(path)
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
| import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return response
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
Fix legacy return in redirectAndSaveimport flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return flask.redirect(path)
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
| <commit_before>import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return response
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
<commit_msg>Fix legacy return in redirectAndSave<commit_after>import flask
from .. import auth
import urllib.parse
def checkForSession():
if "uid" in flask.session:
session = auth.validateSession(flask.session["uid"])
if session != None:
return session
return None
def redirectAndSave(path):
flask.session["donePage"] = urllib.parse.urlparse(flask.request.url).path
return flask.redirect(path)
def markRedirectAsRecovered():
if "donePage" in flask.session:
del flask.session["donePage"]
else:
raise KeyError("No redirect to be recovered from.")
def canRecoverFromRedirect():
if "donePage" in flask.session:
return flask.session["donePage"]
return None
|
788ca6bbd25bcdb55dc92739e7d08b201344b10b | tools/glidein_top.py | tools/glidein_top.py | #!/bin/env python
#
# glidein_top
#
# Execute a top command in the same glidein as the user job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>
#
import os
import stat
import sys
sys.path.append("lib")
sys.path.append("../lib")
import glideinMonitor
def createTopMonitorFile(monitor_file_name,monitor_control_relname,
argv,condor_status):
fd=open(monitor_file_name,"w")
try:
fd.write("#!/bin/sh\n")
fd.write("top -b -n 1\n")
fd.write("echo Done > %s\n"%monitor_control_relname)
finally:
fd.close()
os.chmod(monitor_file_name,stat.S_IRWXU)
args=glideinMonitor.parseArgs(sys.argv[1:])
if len(args['argv'])!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%args['argv'][0]
glideinMonitor.monitor(args['jid'],args['schedd_name'],args['pool_name'],
args['timeout'],
createTopMonitorFile,args['argv'])
| #!/bin/env python
#
# glidein_top.py
#
# Description:
# Execute a top command on a condor job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>]
#
# Author:
# Igor Sfiligoi (May 2007)
#
# License:
# Fermitools
#
import sys,os.path
sys.path.append(os.path.join(sys.path[0],"lib"))
sys.path.append(os.path.join(sys.path[0],"../lib"))
import glideinCmd
def argv_top(argv):
if len(argv)!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%argv[0]
return ['top', '-b', '-n', '1']
glideinCmd.exe_cmd(argv_top)
| Change rel paths into abspaths and use helper module | Change rel paths into abspaths and use helper module
| Python | bsd-3-clause | holzman/glideinwms-old,bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS,bbockelm/glideinWMS | #!/bin/env python
#
# glidein_top
#
# Execute a top command in the same glidein as the user job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>
#
import os
import stat
import sys
sys.path.append("lib")
sys.path.append("../lib")
import glideinMonitor
def createTopMonitorFile(monitor_file_name,monitor_control_relname,
argv,condor_status):
fd=open(monitor_file_name,"w")
try:
fd.write("#!/bin/sh\n")
fd.write("top -b -n 1\n")
fd.write("echo Done > %s\n"%monitor_control_relname)
finally:
fd.close()
os.chmod(monitor_file_name,stat.S_IRWXU)
args=glideinMonitor.parseArgs(sys.argv[1:])
if len(args['argv'])!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%args['argv'][0]
glideinMonitor.monitor(args['jid'],args['schedd_name'],args['pool_name'],
args['timeout'],
createTopMonitorFile,args['argv'])
Change rel paths into abspaths and use helper module | #!/bin/env python
#
# glidein_top.py
#
# Description:
# Execute a top command on a condor job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>]
#
# Author:
# Igor Sfiligoi (May 2007)
#
# License:
# Fermitools
#
import sys,os.path
sys.path.append(os.path.join(sys.path[0],"lib"))
sys.path.append(os.path.join(sys.path[0],"../lib"))
import glideinCmd
def argv_top(argv):
if len(argv)!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%argv[0]
return ['top', '-b', '-n', '1']
glideinCmd.exe_cmd(argv_top)
| <commit_before>#!/bin/env python
#
# glidein_top
#
# Execute a top command in the same glidein as the user job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>
#
import os
import stat
import sys
sys.path.append("lib")
sys.path.append("../lib")
import glideinMonitor
def createTopMonitorFile(monitor_file_name,monitor_control_relname,
argv,condor_status):
fd=open(monitor_file_name,"w")
try:
fd.write("#!/bin/sh\n")
fd.write("top -b -n 1\n")
fd.write("echo Done > %s\n"%monitor_control_relname)
finally:
fd.close()
os.chmod(monitor_file_name,stat.S_IRWXU)
args=glideinMonitor.parseArgs(sys.argv[1:])
if len(args['argv'])!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%args['argv'][0]
glideinMonitor.monitor(args['jid'],args['schedd_name'],args['pool_name'],
args['timeout'],
createTopMonitorFile,args['argv'])
<commit_msg>Change rel paths into abspaths and use helper module<commit_after> | #!/bin/env python
#
# glidein_top.py
#
# Description:
# Execute a top command on a condor job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>]
#
# Author:
# Igor Sfiligoi (May 2007)
#
# License:
# Fermitools
#
import sys,os.path
sys.path.append(os.path.join(sys.path[0],"lib"))
sys.path.append(os.path.join(sys.path[0],"../lib"))
import glideinCmd
def argv_top(argv):
if len(argv)!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%argv[0]
return ['top', '-b', '-n', '1']
glideinCmd.exe_cmd(argv_top)
| #!/bin/env python
#
# glidein_top
#
# Execute a top command in the same glidein as the user job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>
#
import os
import stat
import sys
sys.path.append("lib")
sys.path.append("../lib")
import glideinMonitor
def createTopMonitorFile(monitor_file_name,monitor_control_relname,
argv,condor_status):
fd=open(monitor_file_name,"w")
try:
fd.write("#!/bin/sh\n")
fd.write("top -b -n 1\n")
fd.write("echo Done > %s\n"%monitor_control_relname)
finally:
fd.close()
os.chmod(monitor_file_name,stat.S_IRWXU)
args=glideinMonitor.parseArgs(sys.argv[1:])
if len(args['argv'])!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%args['argv'][0]
glideinMonitor.monitor(args['jid'],args['schedd_name'],args['pool_name'],
args['timeout'],
createTopMonitorFile,args['argv'])
Change rel paths into abspaths and use helper module#!/bin/env python
#
# glidein_top.py
#
# Description:
# Execute a top command on a condor job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>]
#
# Author:
# Igor Sfiligoi (May 2007)
#
# License:
# Fermitools
#
import sys,os.path
sys.path.append(os.path.join(sys.path[0],"lib"))
sys.path.append(os.path.join(sys.path[0],"../lib"))
import glideinCmd
def argv_top(argv):
if len(argv)!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%argv[0]
return ['top', '-b', '-n', '1']
glideinCmd.exe_cmd(argv_top)
| <commit_before>#!/bin/env python
#
# glidein_top
#
# Execute a top command in the same glidein as the user job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>
#
import os
import stat
import sys
sys.path.append("lib")
sys.path.append("../lib")
import glideinMonitor
def createTopMonitorFile(monitor_file_name,monitor_control_relname,
argv,condor_status):
fd=open(monitor_file_name,"w")
try:
fd.write("#!/bin/sh\n")
fd.write("top -b -n 1\n")
fd.write("echo Done > %s\n"%monitor_control_relname)
finally:
fd.close()
os.chmod(monitor_file_name,stat.S_IRWXU)
args=glideinMonitor.parseArgs(sys.argv[1:])
if len(args['argv'])!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%args['argv'][0]
glideinMonitor.monitor(args['jid'],args['schedd_name'],args['pool_name'],
args['timeout'],
createTopMonitorFile,args['argv'])
<commit_msg>Change rel paths into abspaths and use helper module<commit_after>#!/bin/env python
#
# glidein_top.py
#
# Description:
# Execute a top command on a condor job
#
# Usage:
# glidein_top.py <cluster>.<process> [-name <schedd_name>] [-pool <pool_name> ] [-timeout <nr secs>]
#
# Author:
# Igor Sfiligoi (May 2007)
#
# License:
# Fermitools
#
import sys,os.path
sys.path.append(os.path.join(sys.path[0],"lib"))
sys.path.append(os.path.join(sys.path[0],"../lib"))
import glideinCmd
def argv_top(argv):
if len(argv)!=0:
raise RuntimeError, "Unexpected parameters starting with %s found!"%argv[0]
return ['top', '-b', '-n', '1']
glideinCmd.exe_cmd(argv_top)
|
7a4398f6e60b53626a2586cc4df3b7f5fb2a6aff | mysite/profile/management/commands/profile_ten_minutely_tasks.py | mysite/profile/management/commands/profile_ten_minutely_tasks.py | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /+projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/+projects/')
| # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/projects/')
| Use /projects/ not /+projects/ when caching | Use /projects/ not /+projects/ when caching
This prevents staticgenerator from sending out an annoying email
every 10 minutes.
| Python | agpl-3.0 | vipul-sharma20/oh-mainline,SnappleCap/oh-mainline,waseem18/oh-mainline,willingc/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,heeraj123/oh-mainline,waseem18/oh-mainline,sudheesh001/oh-mainline,waseem18/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,vipul-sharma20/oh-mainline,nirmeshk/oh-mainline,moijes12/oh-mainline,sudheesh001/oh-mainline,heeraj123/oh-mainline,ojengwa/oh-mainline,waseem18/oh-mainline,Changaco/oh-mainline,onceuponatimeforever/oh-mainline,ojengwa/oh-mainline,ojengwa/oh-mainline,heeraj123/oh-mainline,Changaco/oh-mainline,campbe13/openhatch,campbe13/openhatch,vipul-sharma20/oh-mainline,eeshangarg/oh-mainline,moijes12/oh-mainline,SnappleCap/oh-mainline,SnappleCap/oh-mainline,campbe13/openhatch,ehashman/oh-mainline,moijes12/oh-mainline,openhatch/oh-mainline,Changaco/oh-mainline,openhatch/oh-mainline,ehashman/oh-mainline,nirmeshk/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,openhatch/oh-mainline,ehashman/oh-mainline,onceuponatimeforever/oh-mainline,eeshangarg/oh-mainline,ojengwa/oh-mainline,campbe13/openhatch,onceuponatimeforever/oh-mainline,openhatch/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,heeraj123/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,ehashman/oh-mainline,sudheesh001/oh-mainline,waseem18/oh-mainline,eeshangarg/oh-mainline,vipul-sharma20/oh-mainline,ehashman/oh-mainline,heeraj123/oh-mainline,onceuponatimeforever/oh-mainline,ojengwa/oh-mainline,Changaco/oh-mainline,moijes12/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,SnappleCap/oh-mainline,willingc/oh-mainline,openhatch/oh-mainline,moijes12/oh-mainline,eeshangarg/oh-mainline,campbe13/openhatch | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /+projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/+projects/')
Use /projects/ not /+projects/ when caching
This prevents staticgenerator from sending out an annoying email
every 10 minutes. | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/projects/')
| <commit_before># This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /+projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/+projects/')
<commit_msg>Use /projects/ not /+projects/ when caching
This prevents staticgenerator from sending out an annoying email
every 10 minutes.<commit_after> | # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/projects/')
| # This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /+projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/+projects/')
Use /projects/ not /+projects/ when caching
This prevents staticgenerator from sending out an annoying email
every 10 minutes.# This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/projects/')
| <commit_before># This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /+projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/+projects/')
<commit_msg>Use /projects/ not /+projects/ when caching
This prevents staticgenerator from sending out an annoying email
every 10 minutes.<commit_after># This file is part of OpenHatch.
# Copyright (C) 2010 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import logging
from django.core.management.base import BaseCommand
import staticgenerator
class Command(BaseCommand):
help = "Run this once every 10 minutes for the OpenHatch profile app."
def handle(self, *args, **options):
# Every 10 minutes, refresh /projects/
root_logger = logging.getLogger('')
root_logger.setLevel(logging.WARN)
staticgenerator.quick_publish('/projects/')
|
85d800d9979fa122e0888af48c2e6a697f9da458 | test/test_sc2replay.py | test/test_sc2replay.py | #!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
| #!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
def test_init_with_a_file_arg(self):
self.replay = SC2Replay(open(TEST_DIR + 'test.SC2Replay', 'rb'))
self.assertEqual(self.replay.map, 'Toxic Slums')
| Add a small test for init with a file arg. | Add a small test for init with a file arg.
| Python | bsd-2-clause | eagleflo/adjutant | #!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
Add a small test for init with a file arg. | #!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
def test_init_with_a_file_arg(self):
self.replay = SC2Replay(open(TEST_DIR + 'test.SC2Replay', 'rb'))
self.assertEqual(self.replay.map, 'Toxic Slums')
| <commit_before>#!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
<commit_msg>Add a small test for init with a file arg.<commit_after> | #!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
def test_init_with_a_file_arg(self):
self.replay = SC2Replay(open(TEST_DIR + 'test.SC2Replay', 'rb'))
self.assertEqual(self.replay.map, 'Toxic Slums')
| #!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
Add a small test for init with a file arg.#!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
def test_init_with_a_file_arg(self):
self.replay = SC2Replay(open(TEST_DIR + 'test.SC2Replay', 'rb'))
self.assertEqual(self.replay.map, 'Toxic Slums')
| <commit_before>#!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
<commit_msg>Add a small test for init with a file arg.<commit_after>#!/usr/bin/env python
# coding: utf-8
import os
import unittest
from adjutant import SC2Replay
TEST_DIR = os.path.realpath(os.path.dirname(__file__)) + '/'
class TestSC2Replay(unittest.TestCase):
def setUp(self):
self.replay = SC2Replay(TEST_DIR + 'test.SC2Replay')
def test_init(self):
self.assertEqual(self.replay.map, 'Toxic Slums')
self.assertEqual(self.replay.duration, '26m 17s')
self.assertEqual(self.replay.version, '1.0.2.16223')
self.assertEqual(len(self.replay.players), 8)
def test_init_with_a_file_arg(self):
self.replay = SC2Replay(open(TEST_DIR + 'test.SC2Replay', 'rb'))
self.assertEqual(self.replay.map, 'Toxic Slums')
|
295e356af1e4422fd8e2af9a44b46f5976a5ec39 | tools/test_sneeze.py | tools/test_sneeze.py |
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni"]
def test_from_namespace():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
fp = open(fname, 'w')
fp.write('from nipype.interfaces.afni import To3d')
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
#print cmd
assert_equal(cmd, 'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni')
if os.path.exists(dname):
rmtree(dname)
|
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d, ThreeDRefit",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni",
"from nipype.interfaces import afni as af"]
def test_imports():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
for impt in import_strings:
fp = open(fname, 'w')
fp.write(impt)
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
yield assert_equal, cmd, \
'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni'
if os.path.exists(dname):
rmtree(dname)
| Add more tests for sneeze. | Add more tests for sneeze. | Python | bsd-3-clause | nipy/nipy-labs,alexis-roche/niseg,nipy/nireg,alexis-roche/nipy,alexis-roche/nipy,alexis-roche/nireg,arokem/nipy,bthirion/nipy,alexis-roche/register,nipy/nireg,bthirion/nipy,alexis-roche/nipy,bthirion/nipy,alexis-roche/nipy,arokem/nipy,alexis-roche/niseg,arokem/nipy,bthirion/nipy,arokem/nipy,alexis-roche/register,alexis-roche/register,nipy/nipy-labs,alexis-roche/nireg |
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni"]
def test_from_namespace():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
fp = open(fname, 'w')
fp.write('from nipype.interfaces.afni import To3d')
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
#print cmd
assert_equal(cmd, 'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni')
if os.path.exists(dname):
rmtree(dname)
Add more tests for sneeze. |
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d, ThreeDRefit",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni",
"from nipype.interfaces import afni as af"]
def test_imports():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
for impt in import_strings:
fp = open(fname, 'w')
fp.write(impt)
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
yield assert_equal, cmd, \
'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni'
if os.path.exists(dname):
rmtree(dname)
| <commit_before>
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni"]
def test_from_namespace():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
fp = open(fname, 'w')
fp.write('from nipype.interfaces.afni import To3d')
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
#print cmd
assert_equal(cmd, 'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni')
if os.path.exists(dname):
rmtree(dname)
<commit_msg>Add more tests for sneeze.<commit_after> |
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d, ThreeDRefit",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni",
"from nipype.interfaces import afni as af"]
def test_imports():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
for impt in import_strings:
fp = open(fname, 'w')
fp.write(impt)
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
yield assert_equal, cmd, \
'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni'
if os.path.exists(dname):
rmtree(dname)
|
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni"]
def test_from_namespace():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
fp = open(fname, 'w')
fp.write('from nipype.interfaces.afni import To3d')
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
#print cmd
assert_equal(cmd, 'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni')
if os.path.exists(dname):
rmtree(dname)
Add more tests for sneeze.
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d, ThreeDRefit",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni",
"from nipype.interfaces import afni as af"]
def test_imports():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
for impt in import_strings:
fp = open(fname, 'w')
fp.write(impt)
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
yield assert_equal, cmd, \
'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni'
if os.path.exists(dname):
rmtree(dname)
| <commit_before>
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni"]
def test_from_namespace():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
fp = open(fname, 'w')
fp.write('from nipype.interfaces.afni import To3d')
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
#print cmd
assert_equal(cmd, 'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni')
if os.path.exists(dname):
rmtree(dname)
<commit_msg>Add more tests for sneeze.<commit_after>
import os
from tempfile import mkdtemp
from shutil import rmtree
from nipy.testing import *
from sneeze import find_pkg, run_nose
import_strings = ["from nipype.interfaces.afni import To3d, ThreeDRefit",
"from nipype.interfaces import afni",
"import nipype.interfaces.afni",
"from nipype.interfaces import afni as af"]
def test_imports():
dname = mkdtemp()
fname = os.path.join(dname, 'test_afni.py')
for impt in import_strings:
fp = open(fname, 'w')
fp.write(impt)
fp.close()
cover_pkg, module = find_pkg(fname)
cmd = run_nose(cover_pkg, fname, dry_run=True)
cmdlst = cmd.split()
cmd = ' '.join(cmdlst[:4]) # strip off temporary directory path
yield assert_equal, cmd, \
'nosetests -sv --with-coverage --cover-package=nipype.interfaces.afni'
if os.path.exists(dname):
rmtree(dname)
|
1f9486cff230beae00e5417d6ad2b1ba28526339 | pson/pson.py | pson/pson.py | import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
if type(pson) == dict and pson.has_key(token): # step one level deeper into the pson with our token
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
| import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
# step one level deeper into the pson with our token
if type(pson) == dict and pson.has_key(token):
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
| Add consistent 4 space indentation | Add consistent 4 space indentation
Applied 4 space indentation all along the main pson.py file.
| Python | mit | imranghory/pson | import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
if type(pson) == dict and pson.has_key(token): # step one level deeper into the pson with our token
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
Add consistent 4 space indentation
Applied 4 space indentation all along the main pson.py file. | import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
# step one level deeper into the pson with our token
if type(pson) == dict and pson.has_key(token):
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
| <commit_before>import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
if type(pson) == dict and pson.has_key(token): # step one level deeper into the pson with our token
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
<commit_msg>Add consistent 4 space indentation
Applied 4 space indentation all along the main pson.py file.<commit_after> | import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
# step one level deeper into the pson with our token
if type(pson) == dict and pson.has_key(token):
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
| import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
if type(pson) == dict and pson.has_key(token): # step one level deeper into the pson with our token
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
Add consistent 4 space indentation
Applied 4 space indentation all along the main pson.py file.import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
# step one level deeper into the pson with our token
if type(pson) == dict and pson.has_key(token):
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
| <commit_before>import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
if type(pson) == dict and pson.has_key(token): # step one level deeper into the pson with our token
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
<commit_msg>Add consistent 4 space indentation
Applied 4 space indentation all along the main pson.py file.<commit_after>import json
from pprint import pprint
def pathparser(path, separator="."):
return path.split(separator)
def pathquery(pson, path, separator=".", missing=None, iterate=True):
if isinstance(path,str) or isinstance(path, unicode):
path = pathparser(path, separator=separator)
counter = 0
for token in path:
# step one level deeper into the pson with our token
if type(pson) == dict and pson.has_key(token):
pson = pson[token]
elif type(pson) == list:
# if we hit an array see if the token is a number else assume we
# want the rest of the path applied to every element in the array
try:
if int(token)<len(pson):
pson = pson[int(token)]
else: #handle a number longer than list len
return missing
except ValueError:
if iterate:
return [pathquery(x, path[counter:]) for x in pson]
return missing
else:
return missing
counter += 1
return pson
|
da6e9416e12ce71cd3f23ded9bd75dccc62d26fe | fcn/config.py | fcn/config.py | import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../data'))
def get_logs_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../logs'))
| import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '_data'))
| Move data directory in package | Move data directory in package
| Python | mit | wkentaro/fcn | import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../data'))
def get_logs_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../logs'))
Move data directory in package | import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '_data'))
| <commit_before>import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../data'))
def get_logs_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../logs'))
<commit_msg>Move data directory in package<commit_after> | import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '_data'))
| import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../data'))
def get_logs_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../logs'))
Move data directory in packageimport os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '_data'))
| <commit_before>import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../data'))
def get_logs_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '../logs'))
<commit_msg>Move data directory in package<commit_after>import os.path as osp
def get_data_dir():
this_dir = osp.dirname(osp.abspath(__file__))
return osp.realpath(osp.join(this_dir, '_data'))
|
945e978d3249762da9a47300cb43d86966de0354 | wanikani2anki.py | wanikani2anki.py | #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
response = urlopen(request)
print(response.getcode())
print(response.info())
user = json.loads(response.read().decode())
print(user)
| #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.error import *
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
try: response = urlopen(request)
except URLError as error:
print('Error while fetching user data: ' + error.reason)
exit()
# print(response.getcode())
# print(response.info())
user = json.loads(response.read().decode())
# print(user)
print("""Fetching information for
user: {username}
level: {level}
""".format(**user['data']))
| Add error handling for GET request. | Add error handling for GET request.
| Python | mpl-2.0 | holocronweaver/wanikani2anki,holocronweaver/wanikani2anki,holocronweaver/wanikani2anki | #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
response = urlopen(request)
print(response.getcode())
print(response.info())
user = json.loads(response.read().decode())
print(user)
Add error handling for GET request. | #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.error import *
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
try: response = urlopen(request)
except URLError as error:
print('Error while fetching user data: ' + error.reason)
exit()
# print(response.getcode())
# print(response.info())
user = json.loads(response.read().decode())
# print(user)
print("""Fetching information for
user: {username}
level: {level}
""".format(**user['data']))
| <commit_before>#! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
response = urlopen(request)
print(response.getcode())
print(response.info())
user = json.loads(response.read().decode())
print(user)
<commit_msg>Add error handling for GET request.<commit_after> | #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.error import *
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
try: response = urlopen(request)
except URLError as error:
print('Error while fetching user data: ' + error.reason)
exit()
# print(response.getcode())
# print(response.info())
user = json.loads(response.read().decode())
# print(user)
print("""Fetching information for
user: {username}
level: {level}
""".format(**user['data']))
| #! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
response = urlopen(request)
print(response.getcode())
print(response.info())
user = json.loads(response.read().decode())
print(user)
Add error handling for GET request.#! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.error import *
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
try: response = urlopen(request)
except URLError as error:
print('Error while fetching user data: ' + error.reason)
exit()
# print(response.getcode())
# print(response.info())
user = json.loads(response.read().decode())
# print(user)
print("""Fetching information for
user: {username}
level: {level}
""".format(**user['data']))
| <commit_before>#! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
response = urlopen(request)
print(response.getcode())
print(response.info())
user = json.loads(response.read().decode())
print(user)
<commit_msg>Add error handling for GET request.<commit_after>#! /usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at http://mozilla.org/MPL/2.0/.
import json
from urllib.error import *
from urllib.request import *
headers = {}
with open('apikey.txt', 'r') as f:
apikey = f.readline().strip()
headers['Authorization'] = 'Token token=' + apikey
rooturl = 'https://www.wanikani.com/api/v2'
request = Request(rooturl + '/user', headers=headers)
try: response = urlopen(request)
except URLError as error:
print('Error while fetching user data: ' + error.reason)
exit()
# print(response.getcode())
# print(response.info())
user = json.loads(response.read().decode())
# print(user)
print("""Fetching information for
user: {username}
level: {level}
""".format(**user['data']))
|
393743e391575d6cf4a3bfffb4f53cfa0848c49e | tests/test_donemail.py | tests/test_donemail.py | from mock import Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
monkeypatch.setattr('smtplib.SMTP', Mock())
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_num_emails(1)
def assert_num_emails(expected_num_emails):
assert smtplib.SMTP.return_value.sendmail.call_count == expected_num_emails
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_num_emails(1)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_num_emails(1)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_num_emails(1)
| from mock import ANY, Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
mock_smtp_class = Mock()
mock_smtp_class.return_value = Mock()
monkeypatch.setattr('smtplib.SMTP', mock_smtp_class)
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_sent_email(ANY, [BOB], ANY)
def assert_num_emails(expected_num_emails):
assert get_mock_smtp().sendmail.call_count == expected_num_emails
def assert_sent_email(from_addr, to_addrs, msg):
get_mock_smtp().sendmail.assert_called_once_with(from_addr, to_addrs, msg)
def get_mock_smtp():
return smtplib.SMTP()
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_sent_email(ANY, [BOB], ANY)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_sent_email(ANY, [BOB], ANY)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_sent_email(ANY, [BOB], ANY)
| Check that test emails are sent to BOB | Check that test emails are sent to BOB
| Python | mit | alexandershov/donemail | from mock import Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
monkeypatch.setattr('smtplib.SMTP', Mock())
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_num_emails(1)
def assert_num_emails(expected_num_emails):
assert smtplib.SMTP.return_value.sendmail.call_count == expected_num_emails
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_num_emails(1)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_num_emails(1)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_num_emails(1)
Check that test emails are sent to BOB | from mock import ANY, Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
mock_smtp_class = Mock()
mock_smtp_class.return_value = Mock()
monkeypatch.setattr('smtplib.SMTP', mock_smtp_class)
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_sent_email(ANY, [BOB], ANY)
def assert_num_emails(expected_num_emails):
assert get_mock_smtp().sendmail.call_count == expected_num_emails
def assert_sent_email(from_addr, to_addrs, msg):
get_mock_smtp().sendmail.assert_called_once_with(from_addr, to_addrs, msg)
def get_mock_smtp():
return smtplib.SMTP()
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_sent_email(ANY, [BOB], ANY)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_sent_email(ANY, [BOB], ANY)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_sent_email(ANY, [BOB], ANY)
| <commit_before>from mock import Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
monkeypatch.setattr('smtplib.SMTP', Mock())
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_num_emails(1)
def assert_num_emails(expected_num_emails):
assert smtplib.SMTP.return_value.sendmail.call_count == expected_num_emails
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_num_emails(1)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_num_emails(1)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_num_emails(1)
<commit_msg>Check that test emails are sent to BOB<commit_after> | from mock import ANY, Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
mock_smtp_class = Mock()
mock_smtp_class.return_value = Mock()
monkeypatch.setattr('smtplib.SMTP', mock_smtp_class)
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_sent_email(ANY, [BOB], ANY)
def assert_num_emails(expected_num_emails):
assert get_mock_smtp().sendmail.call_count == expected_num_emails
def assert_sent_email(from_addr, to_addrs, msg):
get_mock_smtp().sendmail.assert_called_once_with(from_addr, to_addrs, msg)
def get_mock_smtp():
return smtplib.SMTP()
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_sent_email(ANY, [BOB], ANY)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_sent_email(ANY, [BOB], ANY)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_sent_email(ANY, [BOB], ANY)
| from mock import Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
monkeypatch.setattr('smtplib.SMTP', Mock())
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_num_emails(1)
def assert_num_emails(expected_num_emails):
assert smtplib.SMTP.return_value.sendmail.call_count == expected_num_emails
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_num_emails(1)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_num_emails(1)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_num_emails(1)
Check that test emails are sent to BOBfrom mock import ANY, Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
mock_smtp_class = Mock()
mock_smtp_class.return_value = Mock()
monkeypatch.setattr('smtplib.SMTP', mock_smtp_class)
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_sent_email(ANY, [BOB], ANY)
def assert_num_emails(expected_num_emails):
assert get_mock_smtp().sendmail.call_count == expected_num_emails
def assert_sent_email(from_addr, to_addrs, msg):
get_mock_smtp().sendmail.assert_called_once_with(from_addr, to_addrs, msg)
def get_mock_smtp():
return smtplib.SMTP()
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_sent_email(ANY, [BOB], ANY)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_sent_email(ANY, [BOB], ANY)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_sent_email(ANY, [BOB], ANY)
| <commit_before>from mock import Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
monkeypatch.setattr('smtplib.SMTP', Mock())
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_num_emails(1)
def assert_num_emails(expected_num_emails):
assert smtplib.SMTP.return_value.sendmail.call_count == expected_num_emails
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_num_emails(1)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_num_emails(1)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_num_emails(1)
<commit_msg>Check that test emails are sent to BOB<commit_after>from mock import ANY, Mock
import pytest
import smtplib
from donemail import donemail
BOB = 'bob@example.com'
@pytest.fixture(autouse=True)
def monkeypatch_send_email(monkeypatch):
mock_smtp_class = Mock()
mock_smtp_class.return_value = Mock()
monkeypatch.setattr('smtplib.SMTP', mock_smtp_class)
def test_context_manager():
assert_num_emails(0)
with donemail(BOB):
pass
assert_sent_email(ANY, [BOB], ANY)
def assert_num_emails(expected_num_emails):
assert get_mock_smtp().sendmail.call_count == expected_num_emails
def assert_sent_email(from_addr, to_addrs, msg):
get_mock_smtp().sendmail.assert_called_once_with(from_addr, to_addrs, msg)
def get_mock_smtp():
return smtplib.SMTP()
def test_decorator():
@donemail(BOB)
def add(x, y):
return x + y
assert_num_emails(0)
add(1, y=2)
assert_sent_email(ANY, [BOB], ANY)
def test_context_manager_with_exception():
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
with donemail(BOB):
1 / 0
assert_sent_email(ANY, [BOB], ANY)
def test_decorator_with_exception():
@donemail(BOB)
def divide(x, y):
return x / y
assert_num_emails(0)
with pytest.raises(ZeroDivisionError):
divide(1, 0)
assert_sent_email(ANY, [BOB], ANY)
|
634ae735db61ebb211b9e3159ca4dac7861e5553 | cluster/update_jobs.py | cluster/update_jobs.py | from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
| from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
jobids = []
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
jobids.append(job_id)
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
running = Job.get_running_jobs(credential=cred)
unknown = running.exclude(jobid__in=set(jobids)).values_list('jobid', flat=True)
if unknown:
jobs[Job.UNKNOWN] = list(unknown)
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
| Add updating of jobs if their state is now unknown | Add updating of jobs if their state is now unknown
| Python | mit | crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp | from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
Add updating of jobs if their state is now unknown | from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
jobids = []
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
jobids.append(job_id)
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
running = Job.get_running_jobs(credential=cred)
unknown = running.exclude(jobid__in=set(jobids)).values_list('jobid', flat=True)
if unknown:
jobs[Job.UNKNOWN] = list(unknown)
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
| <commit_before>from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
<commit_msg>Add updating of jobs if their state is now unknown<commit_after> | from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
jobids = []
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
jobids.append(job_id)
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
running = Job.get_running_jobs(credential=cred)
unknown = running.exclude(jobid__in=set(jobids)).values_list('jobid', flat=True)
if unknown:
jobs[Job.UNKNOWN] = list(unknown)
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
| from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
Add updating of jobs if their state is now unknownfrom django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
jobids = []
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
jobids.append(job_id)
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
running = Job.get_running_jobs(credential=cred)
unknown = running.exclude(jobid__in=set(jobids)).values_list('jobid', flat=True)
if unknown:
jobs[Job.UNKNOWN] = list(unknown)
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
| <commit_before>from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
<commit_msg>Add updating of jobs if their state is now unknown<commit_after>from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
jobids = []
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
jobids.append(job_id)
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
running = Job.get_running_jobs(credential=cred)
unknown = running.exclude(jobid__in=set(jobids)).values_list('jobid', flat=True)
if unknown:
jobs[Job.UNKNOWN] = list(unknown)
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
|
27e2c86641bac1b2083a36d0eaf84e79553c39ce | pycom/objects.py | pycom/objects.py | # encoding: utf-8
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if hasattr(obj, name):
return obj.name
elif name in obj:
return obj[name]
elif isinstance(obj, (list, tuple)) and isinstance(name, int):
try:
return obj[name]
except Exception:
return default
else:
return default
v = val
| # encoding: utf-8
import six
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if isinstance(name, six.string_types) and hasattr(obj, name):
return getattr(obj, name)
try:
return obj[name]
except Exception:
return default
v = val
| Fix a bug about the function, 'val'. | Fix a bug about the function, 'val'.
| Python | mit | xgfone/xutils,xgfone/pycom | # encoding: utf-8
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if hasattr(obj, name):
return obj.name
elif name in obj:
return obj[name]
elif isinstance(obj, (list, tuple)) and isinstance(name, int):
try:
return obj[name]
except Exception:
return default
else:
return default
v = val
Fix a bug about the function, 'val'. | # encoding: utf-8
import six
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if isinstance(name, six.string_types) and hasattr(obj, name):
return getattr(obj, name)
try:
return obj[name]
except Exception:
return default
v = val
| <commit_before># encoding: utf-8
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if hasattr(obj, name):
return obj.name
elif name in obj:
return obj[name]
elif isinstance(obj, (list, tuple)) and isinstance(name, int):
try:
return obj[name]
except Exception:
return default
else:
return default
v = val
<commit_msg>Fix a bug about the function, 'val'.<commit_after> | # encoding: utf-8
import six
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if isinstance(name, six.string_types) and hasattr(obj, name):
return getattr(obj, name)
try:
return obj[name]
except Exception:
return default
v = val
| # encoding: utf-8
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if hasattr(obj, name):
return obj.name
elif name in obj:
return obj[name]
elif isinstance(obj, (list, tuple)) and isinstance(name, int):
try:
return obj[name]
except Exception:
return default
else:
return default
v = val
Fix a bug about the function, 'val'.# encoding: utf-8
import six
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if isinstance(name, six.string_types) and hasattr(obj, name):
return getattr(obj, name)
try:
return obj[name]
except Exception:
return default
v = val
| <commit_before># encoding: utf-8
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if hasattr(obj, name):
return obj.name
elif name in obj:
return obj[name]
elif isinstance(obj, (list, tuple)) and isinstance(name, int):
try:
return obj[name]
except Exception:
return default
else:
return default
v = val
<commit_msg>Fix a bug about the function, 'val'.<commit_after># encoding: utf-8
import six
### Attribute Wrapper
class AttrWrapper(object):
attrs = []
def __setattr__(self, name, value):
if name not in self.attrs:
raise AttributeError("'%s' is not supported" % name)
object.__setattr__(self, name, value)
def __repr__(self):
attrs = []
template = "%s=%s"
for name in self.attrs:
try:
attrs.append(template % (name, getattr(self, name)))
except AttributeError:
pass
return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs))
def val(obj, name, default=None):
if isinstance(name, six.string_types) and hasattr(obj, name):
return getattr(obj, name)
try:
return obj[name]
except Exception:
return default
v = val
|
23ab67f74fc7c09310638529ccf804ec2271fd6c | pynads/writer.py | pynads/writer.py | from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
def __init__(self, v, log):
self.v = v
if not isinstance(log, list):
self.log = [log]
else:
self.log = log
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
| from .utils import _iter_but_not_str_or_map
from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
__slots__ = ('v', 'log')
def __init__(self, v, log):
self.v = v
if _iter_but_not_str_or_map(log):
print("convert iter to list log...")
self.log = [l for l in log]
else:
print("convert str/map/other to list log...")
self.log = [log]
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
| Use utils._iter_but_not_str_or_map in Writer log creation. | Use utils._iter_but_not_str_or_map in Writer log creation.
| Python | mit | justanr/pynads | from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
def __init__(self, v, log):
self.v = v
if not isinstance(log, list):
self.log = [log]
else:
self.log = log
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
Use utils._iter_but_not_str_or_map in Writer log creation. | from .utils import _iter_but_not_str_or_map
from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
__slots__ = ('v', 'log')
def __init__(self, v, log):
self.v = v
if _iter_but_not_str_or_map(log):
print("convert iter to list log...")
self.log = [l for l in log]
else:
print("convert str/map/other to list log...")
self.log = [log]
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
| <commit_before>from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
def __init__(self, v, log):
self.v = v
if not isinstance(log, list):
self.log = [log]
else:
self.log = log
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
<commit_msg>Use utils._iter_but_not_str_or_map in Writer log creation.<commit_after> | from .utils import _iter_but_not_str_or_map
from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
__slots__ = ('v', 'log')
def __init__(self, v, log):
self.v = v
if _iter_but_not_str_or_map(log):
print("convert iter to list log...")
self.log = [l for l in log]
else:
print("convert str/map/other to list log...")
self.log = [log]
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
| from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
def __init__(self, v, log):
self.v = v
if not isinstance(log, list):
self.log = [log]
else:
self.log = log
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
Use utils._iter_but_not_str_or_map in Writer log creation.from .utils import _iter_but_not_str_or_map
from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
__slots__ = ('v', 'log')
def __init__(self, v, log):
self.v = v
if _iter_but_not_str_or_map(log):
print("convert iter to list log...")
self.log = [l for l in log]
else:
print("convert str/map/other to list log...")
self.log = [log]
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
| <commit_before>from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
def __init__(self, v, log):
self.v = v
if not isinstance(log, list):
self.log = [log]
else:
self.log = log
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
<commit_msg>Use utils._iter_but_not_str_or_map in Writer log creation.<commit_after>from .utils import _iter_but_not_str_or_map
from .monad import Monad
from .functor import fmap
class Writer(Monad):
"""Stores a value as well as a log of events that have transpired
with the value.
"""
__slots__ = ('v', 'log')
def __init__(self, v, log):
self.v = v
if _iter_but_not_str_or_map(log):
print("convert iter to list log...")
self.log = [l for l in log]
else:
print("convert str/map/other to list log...")
self.log = [log]
@classmethod
def unit(cls, v):
return cls(v, [])
def fmap(self, f):
return Writer(f(self.v), self.log)
def apply(self, applicative):
return fmap(self.v, applicative)
def bind(self, f):
v, msg = f(self.v)
return Writer(v, self.log + [msg])
def __repr__(self):
return "Writer({!r}, {!r})".format(self.v, self.log)
|
2a06886077c05e1bfae7c28a09cf3489da6e450a | heat/utils.py | heat/utils.py | class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
print self.__backends.values()
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
| class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
| Remove stray print debug message | Remove stray print debug message
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com>
| Python | apache-2.0 | rickerc/heat_audit,maestro-hybrid-cloud/heat,dims/heat,JioCloud/heat,cwolferh/heat-scratch,rh-s/heat,citrix-openstack-build/heat,steveb/heat,JioCloud/heat,steveb/heat-cfntools,Triv90/Heat,openstack/heat,dragorosson/heat,ntt-sic/heat,varunarya10/heat,miguelgrinberg/heat,rdo-management/heat,pratikmallya/heat,takeshineshiro/heat,redhat-openstack/heat,openstack/heat,Triv90/Heat,miguelgrinberg/heat,srznew/heat,srznew/heat,gonzolino/heat,gonzolino/heat,noironetworks/heat,ntt-sic/heat,dims/heat,steveb/heat,redhat-openstack/heat,varunarya10/heat,takeshineshiro/heat,pshchelo/heat,NeCTAR-RC/heat,noironetworks/heat,maestro-hybrid-cloud/heat,rickerc/heat_audit,citrix-openstack-build/heat,bbandaru/heat-cfntools,pratikmallya/heat,steveb/heat-cfntools,pshchelo/heat,cwolferh/heat-scratch,rdo-management/heat,openstack/heat-cfntools,dragorosson/heat,Triv90/Heat,NeCTAR-RC/heat,rh-s/heat,jasondunsmore/heat,sdake/heat-jeos,jasondunsmore/heat,cryptickp/heat,cryptickp/heat | class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
print self.__backends.values()
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
Remove stray print debug message
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com> | class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
| <commit_before>class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
print self.__backends.values()
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
<commit_msg>Remove stray print debug message
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com><commit_after> | class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
| class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
print self.__backends.values()
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
Remove stray print debug message
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com>class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
| <commit_before>class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
print self.__backends.values()
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
<commit_msg>Remove stray print debug message
Signed-off-by: Steven Dake <8638f3fce5db0278cfbc239bd581dfc00c29ec9d@redhat.com><commit_after>class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = 'sqlalchemy'
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
|
1b50b0e6d73e475172d6870ec34c308a9480586a | run_tests.py | run_tests.py | import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath))
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
| import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath)])
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
| Add auto run test cases, just add STL files. | Add auto run test cases, just add STL files.
| Python | agpl-3.0 | alex1818/CuraEngine,Jwis921/PersonalCuraEngine,alephobjects/CuraEngine,ROBO3D/CuraEngine,markwal/CuraEngine,Jwis921/PersonalCuraEngine,derekhe/CuraEngine,patrick3coffee/CuraTinyG,foosel/CuraEngine,Skeen/CuraJS-Engine,fxtentacle/CuraEngine,electrocbd/CuraEngine,alephobjects/CuraEngine,electrocbd/CuraEngine,uus169/CuraEngine,totalretribution/CuraEngine,Skeen/CuraJS-Engine,jacobdai/CuraEngine-1,Ultimaker/CuraEngine,Jwis921/PersonalCuraEngine,alephobjects/CuraEngine,pratikshashroff/pcura,Ultimaker/CuraEngine,patrick3coffee/CuraTinyG,robotustra/curax,markwal/CuraEngine,daid/CuraCutEngine,mspark93/CuraEngine,daid/CuraCutEngine,patrick3coffee/CuraTinyG,robotustra/curax,foosel/CuraEngine,be3d/CuraEngine,totalretribution/CuraEngine,electrocbd/CuraEngine,jacobdai/CuraEngine-1,be3d/CuraEngine,uus169/CuraEngine,Intrinsically-Sublime/CuraEngine,fxtentacle/CuraEngine,foosel/CuraEngine,pratikshashroff/pcura,Intrinsically-Sublime/CuraEngine,derekhe/CuraEngine,uus169/CuraEngine,mspark93/CuraEngine,Skeen/CuraJS-Engine,phonyphonecall/CuraEngine,robotustra/curax,alex1818/CuraEngine,be3d/CuraEngine,pratikshashroff/pcura,ROBO3D/CuraEngine,fxtentacle/CuraEngine,phonyphonecall/CuraEngine,ROBO3D/CuraEngine,alex1818/CuraEngine,jacobdai/CuraEngine-1,markwal/CuraEngine,mspark93/CuraEngine,derekhe/CuraEngine,Intrinsically-Sublime/CuraEngine,totalretribution/CuraEngine,phonyphonecall/CuraEngine | import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath))
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
Add auto run test cases, just add STL files. | import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath)])
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
| <commit_before>import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath))
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
<commit_msg>Add auto run test cases, just add STL files.<commit_after> | import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath)])
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
| import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath))
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
Add auto run test cases, just add STL files.import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath)])
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
| <commit_before>import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath))
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
<commit_msg>Add auto run test cases, just add STL files.<commit_after>import sys
import os
import subprocess
def main():
executableName = 'CuraEngine'
if len(sys.argv) > 1:
executableName = sys.argv[1]
exitValue = 0
for subPath in os.listdir('testcase_models'):
print 'Running test on %s' % (subPath)
ret = subprocess.call([executableName, os.path.join('testcase_models', subPath)])
if ret != 0:
exitValue = 1
sys.exit(exitValue)
if __name__ == '__main__':
main()
|
faf6f74348ed09f2ba0ebb5a133acc1a04edb737 | main.py | main.py | import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme. Input ? for help and commands"
prompt = "(lexeme) "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
| import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme! Input '?' for help and commands."
prompt = "Enter command: "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
| Change prompt and intro message | Change prompt and intro message
| Python | mit | kdelwat/Lexeme | import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme. Input ? for help and commands"
prompt = "(lexeme) "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
Change prompt and intro message | import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme! Input '?' for help and commands."
prompt = "Enter command: "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
| <commit_before>import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme. Input ? for help and commands"
prompt = "(lexeme) "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
<commit_msg>Change prompt and intro message<commit_after> | import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme! Input '?' for help and commands."
prompt = "Enter command: "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
| import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme. Input ? for help and commands"
prompt = "(lexeme) "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
Change prompt and intro messageimport cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme! Input '?' for help and commands."
prompt = "Enter command: "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
| <commit_before>import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme. Input ? for help and commands"
prompt = "(lexeme) "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
<commit_msg>Change prompt and intro message<commit_after>import cmd
import argparse
from Interface import *
class Lexeme(cmd.Cmd):
intro = "Welcome to Lexeme! Input '?' for help and commands."
prompt = "Enter command: "
def do_list(self, arg):
'List word database.'
listwords()
def do_quit(self, arg):
quit()
def do_add(self, arg):
add()
def do_decline(self, arg):
decline()
def do_statistics(self, arg):
statistics()
def do_search(self, arg):
search()
def do_generate(self, arg):
generate()
def do_export(self, arg):
export()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--database", help="set database file")
parser.add_argument("--config", help="set configuration file")
args = parser.parse_args()
if args.database is not None:
Library.loadDatabase(args.database)
else:
Library.loadDatabase()
if args.config is not None:
loadData(args.config)
else:
loadData()
Lexeme().cmdloop()
|
aa4f8943616adce4dba826924b8d21e1e8164299 | eduid_signup_amp/__init__.py | eduid_signup_amp/__init__.py | from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
# white list of valid attributes for security reasons
for attr in ('email', 'date', 'verified'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
# This values must overwrite existent values
for attr in ('screen_name', 'last_name', 'first_name', 'passwords'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
| from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
email = user.get('email', None)
if email:
attributes['mail'] = email
attributes['mailAliases'] = [{
'email': email,
'verified': user.get('verified', False),
}]
# This values must overwrite existent values
for attr in ('givenName', 'sn', 'displayName', 'passwords',
'date'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
| Change fields to the schema agreed | Change fields to the schema agreed
| Python | bsd-3-clause | SUNET/eduid-signup-amp | from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
# white list of valid attributes for security reasons
for attr in ('email', 'date', 'verified'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
# This values must overwrite existent values
for attr in ('screen_name', 'last_name', 'first_name', 'passwords'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
Change fields to the schema agreed | from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
email = user.get('email', None)
if email:
attributes['mail'] = email
attributes['mailAliases'] = [{
'email': email,
'verified': user.get('verified', False),
}]
# This values must overwrite existent values
for attr in ('givenName', 'sn', 'displayName', 'passwords',
'date'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
| <commit_before>from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
# white list of valid attributes for security reasons
for attr in ('email', 'date', 'verified'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
# This values must overwrite existent values
for attr in ('screen_name', 'last_name', 'first_name', 'passwords'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
<commit_msg>Change fields to the schema agreed<commit_after> | from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
email = user.get('email', None)
if email:
attributes['mail'] = email
attributes['mailAliases'] = [{
'email': email,
'verified': user.get('verified', False),
}]
# This values must overwrite existent values
for attr in ('givenName', 'sn', 'displayName', 'passwords',
'date'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
| from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
# white list of valid attributes for security reasons
for attr in ('email', 'date', 'verified'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
# This values must overwrite existent values
for attr in ('screen_name', 'last_name', 'first_name', 'passwords'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
Change fields to the schema agreedfrom eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
email = user.get('email', None)
if email:
attributes['mail'] = email
attributes['mailAliases'] = [{
'email': email,
'verified': user.get('verified', False),
}]
# This values must overwrite existent values
for attr in ('givenName', 'sn', 'displayName', 'passwords',
'date'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
| <commit_before>from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
# white list of valid attributes for security reasons
for attr in ('email', 'date', 'verified'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
# This values must overwrite existent values
for attr in ('screen_name', 'last_name', 'first_name', 'passwords'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
<commit_msg>Change fields to the schema agreed<commit_after>from eduid_am.exceptions import UserDoesNotExist
def attribute_fetcher(db, user_id):
attributes = {}
user = db.registered.find_one({'_id': user_id})
if user is None:
raise UserDoesNotExist("No user matching _id='%s'" % user_id)
else:
email = user.get('email', None)
if email:
attributes['mail'] = email
attributes['mailAliases'] = [{
'email': email,
'verified': user.get('verified', False),
}]
# This values must overwrite existent values
for attr in ('givenName', 'sn', 'displayName', 'passwords',
'date'):
value = user.get(attr, None)
if value is not None:
attributes[attr] = value
return attributes
|
7784186509e41c72bcf7a4ebbd9b268b49449d35 | user_clipboard/urls.py | user_clipboard/urls.py | from django.conf.urls import patterns, url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = patterns(
'',
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
)
| from django.conf.urls import url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = [
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
]
| Define urlpatterns as a pure list (don't call patterns) | Define urlpatterns as a pure list (don't call patterns) | Python | mit | MagicSolutions/django-user-clipboard,IndustriaTech/django-user-clipboard,MagicSolutions/django-user-clipboard,IndustriaTech/django-user-clipboard | from django.conf.urls import patterns, url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = patterns(
'',
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
)
Define urlpatterns as a pure list (don't call patterns) | from django.conf.urls import url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = [
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
]
| <commit_before>from django.conf.urls import patterns, url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = patterns(
'',
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
)
<commit_msg>Define urlpatterns as a pure list (don't call patterns)<commit_after> | from django.conf.urls import url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = [
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
]
| from django.conf.urls import patterns, url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = patterns(
'',
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
)
Define urlpatterns as a pure list (don't call patterns)from django.conf.urls import url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = [
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
]
| <commit_before>from django.conf.urls import patterns, url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = patterns(
'',
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
)
<commit_msg>Define urlpatterns as a pure list (don't call patterns)<commit_after>from django.conf.urls import url
from .views import ClipboardFileAPIView, ClipboardImageAPIView
urlpatterns = [
url(r'^images/(?P<pk>\d+)$', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^images/', ClipboardImageAPIView.as_view(), name="clipboard_images"),
url(r'^(?P<pk>\d+)$', ClipboardFileAPIView.as_view(), name="clipboard"),
url(r'^', ClipboardFileAPIView.as_view(), name="clipboard"),
]
|
808fdc4351254c8f5b32d5997803562091121044 | cinderella/cinderella/settings/production.py | cinderella/cinderella/settings/production.py | from .base import *
DEBUG = False
ALLOWED_HOSTS = ['188.226.249.33', 'cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| from .base import *
DEBUG = False
ALLOWED_HOSTS = ['cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| Remove IP from allowed hosts | Remove IP from allowed hosts
| Python | mit | jasisz/cinderella,jasisz/cinderella | from .base import *
DEBUG = False
ALLOWED_HOSTS = ['188.226.249.33', 'cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
Remove IP from allowed hosts | from .base import *
DEBUG = False
ALLOWED_HOSTS = ['cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| <commit_before>from .base import *
DEBUG = False
ALLOWED_HOSTS = ['188.226.249.33', 'cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
<commit_msg>Remove IP from allowed hosts<commit_after> | from .base import *
DEBUG = False
ALLOWED_HOSTS = ['cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| from .base import *
DEBUG = False
ALLOWED_HOSTS = ['188.226.249.33', 'cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
Remove IP from allowed hostsfrom .base import *
DEBUG = False
ALLOWED_HOSTS = ['cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
| <commit_before>from .base import *
DEBUG = False
ALLOWED_HOSTS = ['188.226.249.33', 'cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
<commit_msg>Remove IP from allowed hosts<commit_after>from .base import *
DEBUG = False
ALLOWED_HOSTS = ['cinderella.li']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASSWORD'],
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
|
a56707d815271088c2c19f0c2c415d611886e859 | db/__init__.py | db/__init__.py | import categories # nopep8
import plugins # nopep8
import submitted_plugins # nopep8
import tags # nopep8
| import categories # NOQA: F401
import plugins # NOQA: F401
import submitted_plugins # NOQA: F401
import tags # NOQA: F401
| Update nopep8 instructions to NOQA | Update nopep8 instructions to NOQA
`flake8` seems to have dropped support for `# nopep8` in the 3.x
versions (confirmed by testing with latest and `<3`). This causes our
style checking tests to fail.
Replace these with `# NOQA` and pin them to the specific error we care
about here (F401 unused import).
| Python | mit | jonafato/vim-awesome,vim-awesome/vim-awesome,jonafato/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,jonafato/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,jonafato/vim-awesome,divad12/vim-awesome,vim-awesome/vim-awesome,divad12/vim-awesome,vim-awesome/vim-awesome | import categories # nopep8
import plugins # nopep8
import submitted_plugins # nopep8
import tags # nopep8
Update nopep8 instructions to NOQA
`flake8` seems to have dropped support for `# nopep8` in the 3.x
versions (confirmed by testing with latest and `<3`). This causes our
style checking tests to fail.
Replace these with `# NOQA` and pin them to the specific error we care
about here (F401 unused import). | import categories # NOQA: F401
import plugins # NOQA: F401
import submitted_plugins # NOQA: F401
import tags # NOQA: F401
| <commit_before>import categories # nopep8
import plugins # nopep8
import submitted_plugins # nopep8
import tags # nopep8
<commit_msg>Update nopep8 instructions to NOQA
`flake8` seems to have dropped support for `# nopep8` in the 3.x
versions (confirmed by testing with latest and `<3`). This causes our
style checking tests to fail.
Replace these with `# NOQA` and pin them to the specific error we care
about here (F401 unused import).<commit_after> | import categories # NOQA: F401
import plugins # NOQA: F401
import submitted_plugins # NOQA: F401
import tags # NOQA: F401
| import categories # nopep8
import plugins # nopep8
import submitted_plugins # nopep8
import tags # nopep8
Update nopep8 instructions to NOQA
`flake8` seems to have dropped support for `# nopep8` in the 3.x
versions (confirmed by testing with latest and `<3`). This causes our
style checking tests to fail.
Replace these with `# NOQA` and pin them to the specific error we care
about here (F401 unused import).import categories # NOQA: F401
import plugins # NOQA: F401
import submitted_plugins # NOQA: F401
import tags # NOQA: F401
| <commit_before>import categories # nopep8
import plugins # nopep8
import submitted_plugins # nopep8
import tags # nopep8
<commit_msg>Update nopep8 instructions to NOQA
`flake8` seems to have dropped support for `# nopep8` in the 3.x
versions (confirmed by testing with latest and `<3`). This causes our
style checking tests to fail.
Replace these with `# NOQA` and pin them to the specific error we care
about here (F401 unused import).<commit_after>import categories # NOQA: F401
import plugins # NOQA: F401
import submitted_plugins # NOQA: F401
import tags # NOQA: F401
|
e33a68f14a13c0340b2dfcbb13931d2185735951 | scripts/nanopolish_makerange.py | scripts/nanopolish_makerange.py | from __future__ import print_function
import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
| from __future__ import print_function
import sys
import argparse
from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
with open(filename) as handle:
recs = [(title.split(None, 1)[0], len(seq))
for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
| Use Biopython's string based FASTA parser | Use Biopython's string based FASTA parser
This was introduced in Biopython 1.61 back in February 2013,
so the dependencies shouldn't matter.
You could go further here and use a generator expression
over a list comprehension? | Python | mit | jts/nanopolish,jts/nanopolish,jts/nanopolish,jts/nanopolish,jts/nanopolish | from __future__ import print_function
import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
Use Biopython's string based FASTA parser
This was introduced in Biopython 1.61 back in February 2013,
so the dependencies shouldn't matter.
You could go further here and use a generator expression
over a list comprehension? | from __future__ import print_function
import sys
import argparse
from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
with open(filename) as handle:
recs = [(title.split(None, 1)[0], len(seq))
for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
| <commit_before>from __future__ import print_function
import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
<commit_msg>Use Biopython's string based FASTA parser
This was introduced in Biopython 1.61 back in February 2013,
so the dependencies shouldn't matter.
You could go further here and use a generator expression
over a list comprehension?<commit_after> | from __future__ import print_function
import sys
import argparse
from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
with open(filename) as handle:
recs = [(title.split(None, 1)[0], len(seq))
for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
| from __future__ import print_function
import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
Use Biopython's string based FASTA parser
This was introduced in Biopython 1.61 back in February 2013,
so the dependencies shouldn't matter.
You could go further here and use a generator expression
over a list comprehension?from __future__ import print_function
import sys
import argparse
from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
with open(filename) as handle:
recs = [(title.split(None, 1)[0], len(seq))
for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
| <commit_before>from __future__ import print_function
import sys
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
recs = [ (rec.name, len(rec.seq)) for rec in SeqIO.parse(open(filename), "fasta")]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
<commit_msg>Use Biopython's string based FASTA parser
This was introduced in Biopython 1.61 back in February 2013,
so the dependencies shouldn't matter.
You could go further here and use a generator expression
over a list comprehension?<commit_after>from __future__ import print_function
import sys
import argparse
from Bio.SeqIO.FastaIO import SimpleFastaParser
parser = argparse.ArgumentParser(description='Partition a genome into a set of overlapping segments')
parser.add_argument('--segment-length', type=int, default=50000)
parser.add_argument('--overlap-length', type=int, default=200)
args, extra = parser.parse_known_args()
if len(extra) != 1:
sys.stderr.write("Error: a genome file is expected\n")
filename = extra[0]
with open(filename) as handle:
recs = [(title.split(None, 1)[0], len(seq))
for title, seq in SimpleFastaParser(handle)]
SEGMENT_LENGTH = args.segment_length
OVERLAP_LENGTH = args.overlap_length
MIN_SEGMENT_LENGTH = 5 * OVERLAP_LENGTH
for name, length in recs:
n_segments = (length / SEGMENT_LENGTH) + 1
start = 0
while start < length:
end = start + SEGMENT_LENGTH
# If this segment will end near the end of the contig, extend it to end
if length - end < MIN_SEGMENT_LENGTH:
print("%s:%d-%d" % (name, start, length - 1))
start = length
else:
print("%s:%d-%d" % (name, start, end + OVERLAP_LENGTH))
start = end
|
945209957a26d8fc7673795b5bfc5c233ed00e0e | uchicagohvz/game/serializers.py | uchicagohvz/game/serializers.py | from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField('get_killer')
victim = serializers.SerializerMethodField('get_victim')
location = serializers.SerializerMethodField('get_location')
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
| from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField()
victim = serializers.SerializerMethodField()
location = serializers.SerializerMethodField()
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
| Remove default method_name in SerializerMethodField | Remove default method_name in SerializerMethodField | Python | mit | kz26/uchicago-hvz,kz26/uchicago-hvz,kz26/uchicago-hvz | from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField('get_killer')
victim = serializers.SerializerMethodField('get_victim')
location = serializers.SerializerMethodField('get_location')
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
Remove default method_name in SerializerMethodField | from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField()
victim = serializers.SerializerMethodField()
location = serializers.SerializerMethodField()
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
| <commit_before>from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField('get_killer')
victim = serializers.SerializerMethodField('get_victim')
location = serializers.SerializerMethodField('get_location')
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
<commit_msg>Remove default method_name in SerializerMethodField<commit_after> | from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField()
victim = serializers.SerializerMethodField()
location = serializers.SerializerMethodField()
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
| from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField('get_killer')
victim = serializers.SerializerMethodField('get_victim')
location = serializers.SerializerMethodField('get_location')
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
Remove default method_name in SerializerMethodFieldfrom rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField()
victim = serializers.SerializerMethodField()
location = serializers.SerializerMethodField()
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
| <commit_before>from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField('get_killer')
victim = serializers.SerializerMethodField('get_victim')
location = serializers.SerializerMethodField('get_location')
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
<commit_msg>Remove default method_name in SerializerMethodField<commit_after>from rest_framework import serializers
from uchicagohvz.game.models import *
class KillSerializer(serializers.ModelSerializer):
class Meta:
model = Kill
fields = ('id', 'killer', 'victim', 'location', 'date', 'points')
killer = serializers.SerializerMethodField()
victim = serializers.SerializerMethodField()
location = serializers.SerializerMethodField()
def get_killer(self, obj):
return obj.killer.display_name
def get_victim(self, obj):
return obj.victim.display_name
def get_location(self, obj):
if not (obj.lat and obj.lng):
return None
return (obj.lat, obj.lng)
|
9c945162dfb60481c9f5d39c5e42617b030263a9 | mailgun/models.py | mailgun/models.py | import api
import db
from utils import parse_timestamp
import hashlib
import json
def download_logs():
""" Download mailgun logs and store them in the database """
logs = []
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
while True:
print("fecthing logs starting at {}".format(skip))
for log in api.get_logs(limit=100, skip=skip)['items']:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
break
else:
logs[:0] = [(log_hash, log_data, parse_timestamp(log['date_created']]
else:
break
skip += 100
# take items from LIFO queue and save to db
for log_hash, data, timestamp in logs:
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
| import api
import db
from utils import parse_timestamp
from django.db import transaction
from collections import OrderedDict
import hashlib
import json
@transaction.commit_manually
def download_logs():
""" Download mailgun logs and store them in the database """
# use ordered dict to protect against new logs arriving while downloading logs
logs = OrderedDict()
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
fetch_more = True
while fetch_more:
print("fecthing logs skip={}".format(skip))
logs_tmp = api.get_logs(limit=1000, skip=skip)['items']
if len(logs_tmp) == 0:
break
for log in logs_tmp:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
fetch_more = False
break
else:
logs[log_hash] = (log_hash, log_data, parse_timestamp(log['created_at']))
skip += 1000
# take items from LIFO queue and save to db
print("Saving {0} logs to database".format(len(logs)))
for i, (log_hash, data, timestamp) in enumerate(logs.values()):
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
if (i+1) % 100 == 0:
transaction.commit()
transaction.commit()
| Handle transactions manually when saving downloaded logs | Handle transactions manually when saving downloaded logs
| Python | mit | p2pu/mechanical-mooc,p2pu/mechanical-mooc,p2pu/mechanical-mooc,p2pu/mechanical-mooc | import api
import db
from utils import parse_timestamp
import hashlib
import json
def download_logs():
""" Download mailgun logs and store them in the database """
logs = []
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
while True:
print("fecthing logs starting at {}".format(skip))
for log in api.get_logs(limit=100, skip=skip)['items']:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
break
else:
logs[:0] = [(log_hash, log_data, parse_timestamp(log['date_created']]
else:
break
skip += 100
# take items from LIFO queue and save to db
for log_hash, data, timestamp in logs:
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
Handle transactions manually when saving downloaded logs | import api
import db
from utils import parse_timestamp
from django.db import transaction
from collections import OrderedDict
import hashlib
import json
@transaction.commit_manually
def download_logs():
""" Download mailgun logs and store them in the database """
# use ordered dict to protect against new logs arriving while downloading logs
logs = OrderedDict()
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
fetch_more = True
while fetch_more:
print("fecthing logs skip={}".format(skip))
logs_tmp = api.get_logs(limit=1000, skip=skip)['items']
if len(logs_tmp) == 0:
break
for log in logs_tmp:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
fetch_more = False
break
else:
logs[log_hash] = (log_hash, log_data, parse_timestamp(log['created_at']))
skip += 1000
# take items from LIFO queue and save to db
print("Saving {0} logs to database".format(len(logs)))
for i, (log_hash, data, timestamp) in enumerate(logs.values()):
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
if (i+1) % 100 == 0:
transaction.commit()
transaction.commit()
| <commit_before>import api
import db
from utils import parse_timestamp
import hashlib
import json
def download_logs():
""" Download mailgun logs and store them in the database """
logs = []
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
while True:
print("fecthing logs starting at {}".format(skip))
for log in api.get_logs(limit=100, skip=skip)['items']:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
break
else:
logs[:0] = [(log_hash, log_data, parse_timestamp(log['date_created']]
else:
break
skip += 100
# take items from LIFO queue and save to db
for log_hash, data, timestamp in logs:
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
<commit_msg>Handle transactions manually when saving downloaded logs<commit_after> | import api
import db
from utils import parse_timestamp
from django.db import transaction
from collections import OrderedDict
import hashlib
import json
@transaction.commit_manually
def download_logs():
""" Download mailgun logs and store them in the database """
# use ordered dict to protect against new logs arriving while downloading logs
logs = OrderedDict()
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
fetch_more = True
while fetch_more:
print("fecthing logs skip={}".format(skip))
logs_tmp = api.get_logs(limit=1000, skip=skip)['items']
if len(logs_tmp) == 0:
break
for log in logs_tmp:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
fetch_more = False
break
else:
logs[log_hash] = (log_hash, log_data, parse_timestamp(log['created_at']))
skip += 1000
# take items from LIFO queue and save to db
print("Saving {0} logs to database".format(len(logs)))
for i, (log_hash, data, timestamp) in enumerate(logs.values()):
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
if (i+1) % 100 == 0:
transaction.commit()
transaction.commit()
| import api
import db
from utils import parse_timestamp
import hashlib
import json
def download_logs():
""" Download mailgun logs and store them in the database """
logs = []
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
while True:
print("fecthing logs starting at {}".format(skip))
for log in api.get_logs(limit=100, skip=skip)['items']:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
break
else:
logs[:0] = [(log_hash, log_data, parse_timestamp(log['date_created']]
else:
break
skip += 100
# take items from LIFO queue and save to db
for log_hash, data, timestamp in logs:
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
Handle transactions manually when saving downloaded logsimport api
import db
from utils import parse_timestamp
from django.db import transaction
from collections import OrderedDict
import hashlib
import json
@transaction.commit_manually
def download_logs():
""" Download mailgun logs and store them in the database """
# use ordered dict to protect against new logs arriving while downloading logs
logs = OrderedDict()
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
fetch_more = True
while fetch_more:
print("fecthing logs skip={}".format(skip))
logs_tmp = api.get_logs(limit=1000, skip=skip)['items']
if len(logs_tmp) == 0:
break
for log in logs_tmp:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
fetch_more = False
break
else:
logs[log_hash] = (log_hash, log_data, parse_timestamp(log['created_at']))
skip += 1000
# take items from LIFO queue and save to db
print("Saving {0} logs to database".format(len(logs)))
for i, (log_hash, data, timestamp) in enumerate(logs.values()):
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
if (i+1) % 100 == 0:
transaction.commit()
transaction.commit()
| <commit_before>import api
import db
from utils import parse_timestamp
import hashlib
import json
def download_logs():
""" Download mailgun logs and store them in the database """
logs = []
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
while True:
print("fecthing logs starting at {}".format(skip))
for log in api.get_logs(limit=100, skip=skip)['items']:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
break
else:
logs[:0] = [(log_hash, log_data, parse_timestamp(log['date_created']]
else:
break
skip += 100
# take items from LIFO queue and save to db
for log_hash, data, timestamp in logs:
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
<commit_msg>Handle transactions manually when saving downloaded logs<commit_after>import api
import db
from utils import parse_timestamp
from django.db import transaction
from collections import OrderedDict
import hashlib
import json
@transaction.commit_manually
def download_logs():
""" Download mailgun logs and store them in the database """
# use ordered dict to protect against new logs arriving while downloading logs
logs = OrderedDict()
skip = 0
# Fetch all unsaved logs and add them to a LIFO queue
fetch_more = True
while fetch_more:
print("fecthing logs skip={}".format(skip))
logs_tmp = api.get_logs(limit=1000, skip=skip)['items']
if len(logs_tmp) == 0:
break
for log in logs_tmp:
log_data = json.dumps(log)
log_hash = hashlib.sha256(log_data).hexdigest()
if db.MailgunLog.objects.filter(log_hash=log_hash).exists():
fetch_more = False
break
else:
logs[log_hash] = (log_hash, log_data, parse_timestamp(log['created_at']))
skip += 1000
# take items from LIFO queue and save to db
print("Saving {0} logs to database".format(len(logs)))
for i, (log_hash, data, timestamp) in enumerate(logs.values()):
db.MailgunLog(
log_hash=log_hash,
data=data,
timestamp=timestamp
).save()
if (i+1) % 100 == 0:
transaction.commit()
transaction.commit()
|
1c4fbca7ce0b1ad16159f62e1485a3485f1878bb | oidstub.py | oidstub.py | """Stand-in module for those without the speed-enhanced tuple-OID implementation"""
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
| """Stand-in module for those without the speed-enhanced tuple-OID implementation"""
USE_STRING_OIDS = True
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
| Declare use of string OIDs | Declare use of string OIDs
| Python | bsd-3-clause | mmattice/TwistedSNMP | """Stand-in module for those without the speed-enhanced tuple-OID implementation"""
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
Declare use of string OIDs | """Stand-in module for those without the speed-enhanced tuple-OID implementation"""
USE_STRING_OIDS = True
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
| <commit_before>"""Stand-in module for those without the speed-enhanced tuple-OID implementation"""
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
<commit_msg>Declare use of string OIDs<commit_after> | """Stand-in module for those without the speed-enhanced tuple-OID implementation"""
USE_STRING_OIDS = True
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
| """Stand-in module for those without the speed-enhanced tuple-OID implementation"""
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
Declare use of string OIDs"""Stand-in module for those without the speed-enhanced tuple-OID implementation"""
USE_STRING_OIDS = True
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
| <commit_before>"""Stand-in module for those without the speed-enhanced tuple-OID implementation"""
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
<commit_msg>Declare use of string OIDs<commit_after>"""Stand-in module for those without the speed-enhanced tuple-OID implementation"""
USE_STRING_OIDS = True
def OID( value ):
"""Null function to pretend to be oid.OID"""
return str(value)
|
431fdabc5c103c9581758543359a54f650d24bcf | nodes/cpu_node.py | nodes/cpu_node.py | from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [{"data" : {"id": 'code_fragment:validate', "name": 'func validate() (Go)'}}], 'data' : items }
def infer_context(self):
return [] | from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [], 'data' : items }
def infer_context(self):
return []
| Remove CloudWave specific context expansion | Remove CloudWave specific context expansion
| Python | apache-2.0 | sealuzh/ContextBasedAnalytics,sealuzh/ContextBasedAnalytics,sealuzh/ContextBasedAnalytics | from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [{"data" : {"id": 'code_fragment:validate', "name": 'func validate() (Go)'}}], 'data' : items }
def infer_context(self):
return []Remove CloudWave specific context expansion | from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [], 'data' : items }
def infer_context(self):
return []
| <commit_before>from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [{"data" : {"id": 'code_fragment:validate', "name": 'func validate() (Go)'}}], 'data' : items }
def infer_context(self):
return []<commit_msg>Remove CloudWave specific context expansion<commit_after> | from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [], 'data' : items }
def infer_context(self):
return []
| from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [{"data" : {"id": 'code_fragment:validate', "name": 'func validate() (Go)'}}], 'data' : items }
def infer_context(self):
return []Remove CloudWave specific context expansionfrom node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [], 'data' : items }
def infer_context(self):
return []
| <commit_before>from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [{"data" : {"id": 'code_fragment:validate', "name": 'func validate() (Go)'}}], 'data' : items }
def infer_context(self):
return []<commit_msg>Remove CloudWave specific context expansion<commit_after>from node import Node
from model.cpu import CPU
from extraction.http.cpu_cw_http import CpuCwHTTP
class CPUNode(Node):
label = "CPU"
def __init__(self, service, timespan):
super(CPUNode, self).__init__(service, timespan)
def load_entities(self):
return CpuCwHTTP(self.service, self.timespan).load_entities()
def graph_nodes(self):
cpu_nodes = self.load_entities()
items = {}
for cpu in cpu_nodes:
if not items.has_key(cpu.label):
items[cpu.label] = []
items[cpu.label].append(cpu.__dict__)
return { 'expand_nodes' : [], 'data' : items }
def infer_context(self):
return []
|
8db347eaae51ea5f0a591bcecd5ba38263379aae | seqio/__init__.py | seqio/__init__.py | # Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
| # Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import JSONLogger
from seqio.evaluation import Logger
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.evaluation import TensorBoardLogger
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
| Make loggers part of the top-level SeqIO API. | Make loggers part of the top-level SeqIO API.
PiperOrigin-RevId: 400711636
| Python | apache-2.0 | google/seqio | # Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
Make loggers part of the top-level SeqIO API.
PiperOrigin-RevId: 400711636 | # Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import JSONLogger
from seqio.evaluation import Logger
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.evaluation import TensorBoardLogger
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
| <commit_before># Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
<commit_msg>Make loggers part of the top-level SeqIO API.
PiperOrigin-RevId: 400711636<commit_after> | # Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import JSONLogger
from seqio.evaluation import Logger
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.evaluation import TensorBoardLogger
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
| # Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
Make loggers part of the top-level SeqIO API.
PiperOrigin-RevId: 400711636# Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import JSONLogger
from seqio.evaluation import Logger
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.evaluation import TensorBoardLogger
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
| <commit_before># Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
<commit_msg>Make loggers part of the top-level SeqIO API.
PiperOrigin-RevId: 400711636<commit_after># Copyright 2021 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import to top-level API."""
# pylint:disable=wildcard-import,g-bad-import-order
from seqio.dataset_providers import *
from seqio import evaluation
from seqio import experimental
from seqio.evaluation import Evaluator
from seqio.evaluation import JSONLogger
from seqio.evaluation import Logger
from seqio.evaluation import TensorAndNumpyEncoder
from seqio.evaluation import TensorBoardLogger
from seqio.feature_converters import *
from seqio import preprocessors
import seqio.test_utils
from seqio.utils import *
from seqio.vocabularies import *
# Version number.
from seqio.version import __version__
|
484eaaf6349a631f483af12acd358bce5ca567d5 | zeeko/messages/setup_package.py | zeeko/messages/setup_package.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
import zmq
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
import zmq
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions | Fix stray zmq import in egg_info | Fix stray zmq import in egg_info
| Python | bsd-3-clause | alexrudy/Zeeko,alexrudy/Zeeko | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
import zmq
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensionsFix stray zmq import in egg_info | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
import zmq
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions | <commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
import zmq
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions<commit_msg>Fix stray zmq import in egg_info<commit_after> | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
import zmq
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
import zmq
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensionsFix stray zmq import in egg_info# -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
import zmq
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions | <commit_before># -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
import zmq
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions<commit_msg>Fix stray zmq import in egg_info<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import
import glob
import os
import copy
from distutils.core import Extension
def get_extensions(**kwargs):
"""Get the Cython extensions"""
import zmq
this_directory = os.path.dirname(__file__)
this_name = __name__.split(".")[:-1]
extension_args = {
'include_dirs' : ['numpy'] + zmq.get_includes(),
'libraries' : [],
'sources' : []
}
extension_args.update(kwargs)
extensions = []
for component in glob.iglob(os.path.join(this_directory, "*.pyx")):
# Component name and full module name.
this_extension_args = copy.deepcopy(extension_args)
cname = os.path.splitext(os.path.basename(component))[0]
if cname.startswith("_"):
cname = cname[1:]
name = ".".join(this_name + ["_{0:s}".format(cname)])
else:
name = ".".join(this_name + [cname])
this_extension_args['sources'].append(component)
# Extension object.
extension = Extension(name, **this_extension_args)
extensions.append(extension)
return extensions |
5c447d46a8a62407549650ada98131968ace9921 | spyc/scheduler.py | spyc/scheduler.py | from spyc.graph import Vertex, find_cycle, topological_sort
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
assert False # TODO proper checking
for v in topological_sort(verticies):
v.data.apply()
| from spyc.graph import Vertex, find_cycle, topological_sort
class CircularDependency(Exception):
pass
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
raise CircularDependency(cycle)
for v in topological_sort(verticies):
v.data.apply()
| Raise a more useful error for circular deps. | Raise a more useful error for circular deps.
| Python | lgpl-2.1 | zenhack/spyc | from spyc.graph import Vertex, find_cycle, topological_sort
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
assert False # TODO proper checking
for v in topological_sort(verticies):
v.data.apply()
Raise a more useful error for circular deps. | from spyc.graph import Vertex, find_cycle, topological_sort
class CircularDependency(Exception):
pass
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
raise CircularDependency(cycle)
for v in topological_sort(verticies):
v.data.apply()
| <commit_before>from spyc.graph import Vertex, find_cycle, topological_sort
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
assert False # TODO proper checking
for v in topological_sort(verticies):
v.data.apply()
<commit_msg>Raise a more useful error for circular deps.<commit_after> | from spyc.graph import Vertex, find_cycle, topological_sort
class CircularDependency(Exception):
pass
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
raise CircularDependency(cycle)
for v in topological_sort(verticies):
v.data.apply()
| from spyc.graph import Vertex, find_cycle, topological_sort
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
assert False # TODO proper checking
for v in topological_sort(verticies):
v.data.apply()
Raise a more useful error for circular deps.from spyc.graph import Vertex, find_cycle, topological_sort
class CircularDependency(Exception):
pass
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
raise CircularDependency(cycle)
for v in topological_sort(verticies):
v.data.apply()
| <commit_before>from spyc.graph import Vertex, find_cycle, topological_sort
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
assert False # TODO proper checking
for v in topological_sort(verticies):
v.data.apply()
<commit_msg>Raise a more useful error for circular deps.<commit_after>from spyc.graph import Vertex, find_cycle, topological_sort
class CircularDependency(Exception):
pass
class Scheduler(object):
def __init__(self):
self.specs = {}
def ensure(self, spec):
"""Require that ``spec`` is satisfied."""
if spec.key() in self.specs:
self.specs[spec.key()].data.merge(spec)
else:
self.specs[spec.key()] = Vertex(spec)
def depend(self, first, next):
"""Specify that ``first`` depends on ``next``.
This also has the effect of invoking ``ensure`` on both resources.
"""
first.schedule(self)
next.schedule(self)
self.specs[first.key()].edges.add(self.specs[next.key()])
def apply(self):
verticies = set(self.specs.values())
cycle = find_cycle(verticies)
if cycle is not None:
raise CircularDependency(cycle)
for v in topological_sort(verticies):
v.data.apply()
|
01e629b43be83cd5ba37f7a3ecbf60c73d8ed2e6 | calexicon/internal/tests/test_julian.py | calexicon/internal/tests/test_julian.py | import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian, is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
| import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian
from calexicon.internal.julian import is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
| Split long import line up into two. | Split long import line up into two.
| Python | apache-2.0 | jwg4/calexicon,jwg4/qual | import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian, is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
Split long import line up into two. | import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian
from calexicon.internal.julian import is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
| <commit_before>import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian, is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
<commit_msg>Split long import line up into two.<commit_after> | import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian
from calexicon.internal.julian import is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
| import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian, is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
Split long import line up into two.import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian
from calexicon.internal.julian import is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
| <commit_before>import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian, is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
<commit_msg>Split long import line up into two.<commit_after>import unittest
from datetime import date as vanilla_date
from calexicon.internal.julian import distant_julian_to_gregorian, julian_to_gregorian
from calexicon.internal.julian import is_julian_leap_year
class TestJulian(unittest.TestCase):
def test_is_gregorian_leap_year(self):
self.assertTrue(is_julian_leap_year(2000))
self.assertTrue(is_julian_leap_year(1984))
self.assertTrue(is_julian_leap_year(1900))
self.assertFalse(is_julian_leap_year(1901))
def test_distant_julian_to_gregorian(self):
self.assertEqual(distant_julian_to_gregorian(9999, 12, 1), (10000, 2, 12))
def test_julian_to_gregorian(self):
self.assertEqual(julian_to_gregorian(1984, 2, 29), vanilla_date(1984, 3, 13))
|
c0a74c86e772185d35f0e6049e0ce04fcdb30793 | chatterbot/adapters/io/multi_adapter.py | chatterbot/adapters/io/multi_adapter.py | from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for adapter in self.adapters:
adapter.process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
| from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for i in range(1, len(self.adapters)):
self.adapters[i].process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
| Fix first io adapter being called twice. | Fix first io adapter being called twice.
| Python | bsd-3-clause | Reinaesaya/OUIRL-ChatBot,maclogan/VirtualPenPal,Reinaesaya/OUIRL-ChatBot,Gustavo6046/ChatterBot,davizucon/ChatterBot,gunthercox/ChatterBot,vkosuri/ChatterBot | from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for adapter in self.adapters:
adapter.process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
Fix first io adapter being called twice. | from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for i in range(1, len(self.adapters)):
self.adapters[i].process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
| <commit_before>from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for adapter in self.adapters:
adapter.process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
<commit_msg>Fix first io adapter being called twice.<commit_after> | from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for i in range(1, len(self.adapters)):
self.adapters[i].process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
| from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for adapter in self.adapters:
adapter.process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
Fix first io adapter being called twice.from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for i in range(1, len(self.adapters)):
self.adapters[i].process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
| <commit_before>from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for adapter in self.adapters:
adapter.process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
<commit_msg>Fix first io adapter being called twice.<commit_after>from .io import IOAdapter
class MultiIOAdapter(IOAdapter):
def __init__(self, **kwargs):
super(MultiIOAdapter, self).__init__(**kwargs)
self.adapters = []
def process_input(self, *args, **kwargs):
"""
Returns data retrieved from the input source.
"""
if self.adapters is not []:
return self.adapters[0].process_input(*args, **kwargs)
def process_response(self, statement):
"""
Takes an input value.
Returns an output value.
"""
for i in range(1, len(self.adapters)):
self.adapters[i].process_response(statement)
return self.adapters[0].process_response(statement)
def add_adapter(self, adapter):
self.adapters.append(adapter)
def set_context(self, context):
"""
Set the context for each of the contained io adapters.
"""
super(MultiIOAdapter, self).set_context(context)
for adapter in self.adapters:
adapter.set_context(context)
|
5ae17b58d4823cb5e74ec1b7802c0debbc119dc9 | setuptools/command/bdist_wininst.py | setuptools/command/bdist_wininst.py | from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
| from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
finally:
self._is_running = False
| Remove patching of upload_names (dist_files) - Python 2.6 already does the right thing. | Remove patching of upload_names (dist_files) - Python 2.6 already does the right thing.
| Python | mit | pypa/setuptools,pypa/setuptools,pypa/setuptools | from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
Remove patching of upload_names (dist_files) - Python 2.6 already does the right thing. | from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
finally:
self._is_running = False
| <commit_before>from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
<commit_msg>Remove patching of upload_names (dist_files) - Python 2.6 already does the right thing.<commit_after> | from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
finally:
self._is_running = False
| from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
Remove patching of upload_names (dist_files) - Python 2.6 already does the right thing.from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
finally:
self._is_running = False
| <commit_before>from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
_good_upload = _bad_upload = None
def create_exe(self, arcname, fullname, bitmap=None):
_bdist_wininst.create_exe(self, arcname, fullname, bitmap)
installer_name = self.get_installer_filename(fullname)
if self.target_version:
pyversion = self.target_version
# fix 2.5+ bdist_wininst ignoring --target-version spec
self._bad_upload = ('bdist_wininst', 'any', installer_name)
else:
pyversion = 'any'
self._good_upload = ('bdist_wininst', pyversion, installer_name)
def _fix_upload_names(self):
good, bad = self._good_upload, self._bad_upload
dist_files = getattr(self.distribution, 'dist_files', [])
if bad in dist_files:
dist_files.remove(bad)
if good not in dist_files:
dist_files.append(good)
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
self._fix_upload_names()
finally:
self._is_running = False
<commit_msg>Remove patching of upload_names (dist_files) - Python 2.6 already does the right thing.<commit_after>from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst
class bdist_wininst(_bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None # work around distutils bug
return cmd
def run(self):
self._is_running = True
try:
_bdist_wininst.run(self)
finally:
self._is_running = False
|
ec4b2fc266eb033dab9319c4d2f8ece6fd23170a | src/start_scraping.py | src/start_scraping.py | from main import initiate_shame
# Testing this
initiate_shame(1141922, 2016)
| from main import initiate_shame
initiate_shame(1141922, 2017)
initiate_shame(144768, 2017)
| Update script file for the season | Update script file for the season
| Python | mit | troym9731/fantasy_football | from main import initiate_shame
# Testing this
initiate_shame(1141922, 2016)
Update script file for the season | from main import initiate_shame
initiate_shame(1141922, 2017)
initiate_shame(144768, 2017)
| <commit_before>from main import initiate_shame
# Testing this
initiate_shame(1141922, 2016)
<commit_msg>Update script file for the season<commit_after> | from main import initiate_shame
initiate_shame(1141922, 2017)
initiate_shame(144768, 2017)
| from main import initiate_shame
# Testing this
initiate_shame(1141922, 2016)
Update script file for the seasonfrom main import initiate_shame
initiate_shame(1141922, 2017)
initiate_shame(144768, 2017)
| <commit_before>from main import initiate_shame
# Testing this
initiate_shame(1141922, 2016)
<commit_msg>Update script file for the season<commit_after>from main import initiate_shame
initiate_shame(1141922, 2017)
initiate_shame(144768, 2017)
|
ffd4c59f4916087eac0977355a638508757c80fd | taskmonitor/models.py | taskmonitor/models.py | from celery import states
from django.db import models
STATES_CHOICES = zip(states.ALL_STATES, states.ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
| from celery import states
from django.db import models
ALL_STATES = sorted(states.ALL_STATES)
STATES_CHOICES = zip(ALL_STATES, ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
| Fix neverending creation of migrations on heroku | Fix neverending creation of migrations on heroku
| Python | agpl-3.0 | HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily | from celery import states
from django.db import models
STATES_CHOICES = zip(states.ALL_STATES, states.ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
Fix neverending creation of migrations on heroku | from celery import states
from django.db import models
ALL_STATES = sorted(states.ALL_STATES)
STATES_CHOICES = zip(ALL_STATES, ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
| <commit_before>from celery import states
from django.db import models
STATES_CHOICES = zip(states.ALL_STATES, states.ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
<commit_msg>Fix neverending creation of migrations on heroku<commit_after> | from celery import states
from django.db import models
ALL_STATES = sorted(states.ALL_STATES)
STATES_CHOICES = zip(ALL_STATES, ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
| from celery import states
from django.db import models
STATES_CHOICES = zip(states.ALL_STATES, states.ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
Fix neverending creation of migrations on herokufrom celery import states
from django.db import models
ALL_STATES = sorted(states.ALL_STATES)
STATES_CHOICES = zip(ALL_STATES, ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
| <commit_before>from celery import states
from django.db import models
STATES_CHOICES = zip(states.ALL_STATES, states.ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
<commit_msg>Fix neverending creation of migrations on heroku<commit_after>from celery import states
from django.db import models
ALL_STATES = sorted(states.ALL_STATES)
STATES_CHOICES = zip(ALL_STATES, ALL_STATES)
class TaskStatus(models.Model):
"""
Task status.
With this the status of celery tasks can be monitored, more reliably than
depending on the broker or celery itself.
"""
status = models.CharField(max_length=20, default=states.PENDING, choices=STATES_CHOICES, db_index=True)
task_id = models.CharField(max_length=50, unique=True, blank=True, null=True, db_index=True)
signature = models.CharField(max_length=255, db_index=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
expires_at = models.DateTimeField(null=True)
def __unicode__(self):
return unicode('%s | %s | %s ' % (
self.created_at.strftime('%Y-%m-%d %H:%M:%S'),
self.status,
self.signature
))
class Meta:
app_label = 'taskmonitor'
verbose_name_plural = 'Task statuses'
|
227d4c152367292e8b0b8801d9ce6179af92432a | python/014_longest_common_prefix.py | python/014_longest_common_prefix.py | """
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ""
lcp=list(strs[0])
for i,string in enumerate(strs):
if list(string[0:len(lcp)])==lcp:
continue
else:
while len(lcp)>0 and list(string[0:len(lcp)])!=lcp:
lcp.pop()
if lcp==0:
return ""
return "".join(lcp)
| """
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if strs is None or strs == "":
return ""
lcp = list(strs[0])
for i, string in enumerate(strs):
if list(string[0:len(lcp)]) == lcp:
continue
else:
while len(lcp) > 0 and list(string[0:len(lcp)]) != lcp:
lcp.pop()
if lcp == 0:
return ""
return "".join(lcp)
a = Solution()
print(a.longestCommonPrefix(["apps","apple","append"]) == "app")
| Add test case to 014 | Add test case to 014
| Python | mit | ufjfeng/leetcode-jf-soln,ufjfeng/leetcode-jf-soln | """
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ""
lcp=list(strs[0])
for i,string in enumerate(strs):
if list(string[0:len(lcp)])==lcp:
continue
else:
while len(lcp)>0 and list(string[0:len(lcp)])!=lcp:
lcp.pop()
if lcp==0:
return ""
return "".join(lcp)
Add test case to 014 | """
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if strs is None or strs == "":
return ""
lcp = list(strs[0])
for i, string in enumerate(strs):
if list(string[0:len(lcp)]) == lcp:
continue
else:
while len(lcp) > 0 and list(string[0:len(lcp)]) != lcp:
lcp.pop()
if lcp == 0:
return ""
return "".join(lcp)
a = Solution()
print(a.longestCommonPrefix(["apps","apple","append"]) == "app")
| <commit_before>"""
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ""
lcp=list(strs[0])
for i,string in enumerate(strs):
if list(string[0:len(lcp)])==lcp:
continue
else:
while len(lcp)>0 and list(string[0:len(lcp)])!=lcp:
lcp.pop()
if lcp==0:
return ""
return "".join(lcp)
<commit_msg>Add test case to 014<commit_after> | """
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if strs is None or strs == "":
return ""
lcp = list(strs[0])
for i, string in enumerate(strs):
if list(string[0:len(lcp)]) == lcp:
continue
else:
while len(lcp) > 0 and list(string[0:len(lcp)]) != lcp:
lcp.pop()
if lcp == 0:
return ""
return "".join(lcp)
a = Solution()
print(a.longestCommonPrefix(["apps","apple","append"]) == "app")
| """
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ""
lcp=list(strs[0])
for i,string in enumerate(strs):
if list(string[0:len(lcp)])==lcp:
continue
else:
while len(lcp)>0 and list(string[0:len(lcp)])!=lcp:
lcp.pop()
if lcp==0:
return ""
return "".join(lcp)
Add test case to 014"""
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if strs is None or strs == "":
return ""
lcp = list(strs[0])
for i, string in enumerate(strs):
if list(string[0:len(lcp)]) == lcp:
continue
else:
while len(lcp) > 0 and list(string[0:len(lcp)]) != lcp:
lcp.pop()
if lcp == 0:
return ""
return "".join(lcp)
a = Solution()
print(a.longestCommonPrefix(["apps","apple","append"]) == "app")
| <commit_before>"""
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if len(strs)==0:
return ""
lcp=list(strs[0])
for i,string in enumerate(strs):
if list(string[0:len(lcp)])==lcp:
continue
else:
while len(lcp)>0 and list(string[0:len(lcp)])!=lcp:
lcp.pop()
if lcp==0:
return ""
return "".join(lcp)
<commit_msg>Add test case to 014<commit_after>"""
Write a function to find the longest common prefix string amongst an array of strings.
"""
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
if strs is None or strs == "":
return ""
lcp = list(strs[0])
for i, string in enumerate(strs):
if list(string[0:len(lcp)]) == lcp:
continue
else:
while len(lcp) > 0 and list(string[0:len(lcp)]) != lcp:
lcp.pop()
if lcp == 0:
return ""
return "".join(lcp)
a = Solution()
print(a.longestCommonPrefix(["apps","apple","append"]) == "app")
|
d7c293fb430c31c237e3aca7ba469f0237b18d8d | scikits/talkbox/linpred/__init__.py | scikits/talkbox/linpred/__init__.py | import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
| import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
from common import lpcres
__all__ += ['lpcres']
| Add lpcres in linpred namespace. | Add lpcres in linpred namespace.
| Python | mit | cournape/talkbox,cournape/talkbox | import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
Add lpcres in linpred namespace. | import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
from common import lpcres
__all__ += ['lpcres']
| <commit_before>import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
<commit_msg>Add lpcres in linpred namespace.<commit_after> | import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
from common import lpcres
__all__ += ['lpcres']
| import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
Add lpcres in linpred namespace.import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
from common import lpcres
__all__ += ['lpcres']
| <commit_before>import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
<commit_msg>Add lpcres in linpred namespace.<commit_after>import levinson_lpc
from levinson_lpc import *
__all__ = levinson_lpc.__all__
from common import lpcres
__all__ += ['lpcres']
|
674fa7692c71524541d8797a65968e5e605454e7 | testrail/suite.py | testrail/suite.py | from datetime import datetime
import api
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@property
def url(self):
return self._content.get('url')
| from datetime import datetime
import api
from helper import TestRailError
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@description.setter
def description(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['description'] = value
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@name.setter
def name(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['name'] = value
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@project.setter
def project(self, value):
if type(value) != Project:
raise TestRailError('input must be a Project')
self.api.project_with_id(value.id) # verify project is valid
self._content['project_id'] = value.id
@property
def url(self):
return self._content.get('url')
def raw_data(self):
return self._content
| Add setters for project, name, and description. | Add setters for project, name, and description.
| Python | mit | travispavek/testrail-python,travispavek/testrail | from datetime import datetime
import api
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@property
def url(self):
return self._content.get('url')
Add setters for project, name, and description. | from datetime import datetime
import api
from helper import TestRailError
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@description.setter
def description(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['description'] = value
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@name.setter
def name(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['name'] = value
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@project.setter
def project(self, value):
if type(value) != Project:
raise TestRailError('input must be a Project')
self.api.project_with_id(value.id) # verify project is valid
self._content['project_id'] = value.id
@property
def url(self):
return self._content.get('url')
def raw_data(self):
return self._content
| <commit_before>from datetime import datetime
import api
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@property
def url(self):
return self._content.get('url')
<commit_msg>Add setters for project, name, and description.<commit_after> | from datetime import datetime
import api
from helper import TestRailError
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@description.setter
def description(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['description'] = value
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@name.setter
def name(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['name'] = value
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@project.setter
def project(self, value):
if type(value) != Project:
raise TestRailError('input must be a Project')
self.api.project_with_id(value.id) # verify project is valid
self._content['project_id'] = value.id
@property
def url(self):
return self._content.get('url')
def raw_data(self):
return self._content
| from datetime import datetime
import api
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@property
def url(self):
return self._content.get('url')
Add setters for project, name, and description.from datetime import datetime
import api
from helper import TestRailError
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@description.setter
def description(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['description'] = value
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@name.setter
def name(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['name'] = value
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@project.setter
def project(self, value):
if type(value) != Project:
raise TestRailError('input must be a Project')
self.api.project_with_id(value.id) # verify project is valid
self._content['project_id'] = value.id
@property
def url(self):
return self._content.get('url')
def raw_data(self):
return self._content
| <commit_before>from datetime import datetime
import api
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@property
def url(self):
return self._content.get('url')
<commit_msg>Add setters for project, name, and description.<commit_after>from datetime import datetime
import api
from helper import TestRailError
from project import Project
class Suite(object):
def __init__(self, content):
self._content = content
self.api = api.API()
@property
def id(self):
return self._content.get('id')
@property
def completed_on(self):
try:
return datetime.fromtimestamp(
int(self._content.get('completed_on')))
except TypeError:
return None
@property
def description(self):
return self._content.get('description')
@description.setter
def description(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['description'] = value
@property
def is_baseline(self):
return self._content.get('is_baseline')
@property
def is_completed(self):
return self._content.get('is_completed')
@property
def is_master(self):
return self._content.get('is_master')
@property
def name(self):
return self._content.get('name')
@name.setter
def name(self, value):
if type(value) != str:
raise TestRailError('input must be a string')
self._content['name'] = value
@property
def project(self):
return Project(
self.api.project_with_id(self._content.get('project_id')))
@project.setter
def project(self, value):
if type(value) != Project:
raise TestRailError('input must be a Project')
self.api.project_with_id(value.id) # verify project is valid
self._content['project_id'] = value.id
@property
def url(self):
return self._content.get('url')
def raw_data(self):
return self._content
|
e704d8cb63e76bb1f5b1da6fec7ae4f65d7710f1 | tests/__init__.py | tests/__init__.py | import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock())
| import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock(), 'Task(s) still running after test finished')
| Add comment in BaseTests tearDown | Add comment in BaseTests tearDown
| Python | apache-2.0 | rgalanakis/goless,rgalanakis/goless | import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock())
Add comment in BaseTests tearDown | import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock(), 'Task(s) still running after test finished')
| <commit_before>import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock())
<commit_msg>Add comment in BaseTests tearDown<commit_after> | import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock(), 'Task(s) still running after test finished')
| import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock())
Add comment in BaseTests tearDownimport sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock(), 'Task(s) still running after test finished')
| <commit_before>import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock())
<commit_msg>Add comment in BaseTests tearDown<commit_after>import sys
try:
# noinspection PyPackageRequirements
import unittest2 as unittest
sys.modules['unittest'] = unittest
except ImportError:
import unittest
from goless.backends import current as be
class BaseTests(unittest.TestCase):
"""
Base class for unit tests.
Yields in setup and teardown so no lingering tasklets
are run in a later test,
potentially causing an error that would leave people scratching their heads.
"""
def setUp(self):
be.yield_()
def tearDown(self):
be.yield_()
self.assertTrue(be.would_deadlock(), 'Task(s) still running after test finished')
|
4d38b5e391e222c6da371bb100f0bd84c33e3435 | tests/test_cli.py | tests/test_cli.py | import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser(self):
cli.make_argument_parser()
# TODO: Verify default arguments
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
| import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser_default(self):
parser = cli.make_argument_parser()
options = parser.parse_args([])
self.assertEqual(options.source_ip, cli.DEFAULTS['source_ip'])
self.assertEqual(options.source_port, cli.DEFAULTS['source_port'])
self.assertEqual(options.stun_port, cli.DEFAULTS['stun_port'])
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
| Verify default arguments in parser. | Verify default arguments in parser.
| Python | mit | b1naryth1ef/pystun,jtriley/pystun | import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser(self):
cli.make_argument_parser()
# TODO: Verify default arguments
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
Verify default arguments in parser. | import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser_default(self):
parser = cli.make_argument_parser()
options = parser.parse_args([])
self.assertEqual(options.source_ip, cli.DEFAULTS['source_ip'])
self.assertEqual(options.source_port, cli.DEFAULTS['source_port'])
self.assertEqual(options.stun_port, cli.DEFAULTS['stun_port'])
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
| <commit_before>import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser(self):
cli.make_argument_parser()
# TODO: Verify default arguments
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
<commit_msg>Verify default arguments in parser.<commit_after> | import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser_default(self):
parser = cli.make_argument_parser()
options = parser.parse_args([])
self.assertEqual(options.source_ip, cli.DEFAULTS['source_ip'])
self.assertEqual(options.source_port, cli.DEFAULTS['source_port'])
self.assertEqual(options.stun_port, cli.DEFAULTS['stun_port'])
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
| import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser(self):
cli.make_argument_parser()
# TODO: Verify default arguments
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
Verify default arguments in parser.import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser_default(self):
parser = cli.make_argument_parser()
options = parser.parse_args([])
self.assertEqual(options.source_ip, cli.DEFAULTS['source_ip'])
self.assertEqual(options.source_port, cli.DEFAULTS['source_port'])
self.assertEqual(options.stun_port, cli.DEFAULTS['stun_port'])
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
| <commit_before>import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser(self):
cli.make_argument_parser()
# TODO: Verify default arguments
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
<commit_msg>Verify default arguments in parser.<commit_after>import unittest
from stun import cli
class TestCLI(unittest.TestCase):
"""Test the CLI API."""
def test_cli_parser_default(self):
parser = cli.make_argument_parser()
options = parser.parse_args([])
self.assertEqual(options.source_ip, cli.DEFAULTS['source_ip'])
self.assertEqual(options.source_port, cli.DEFAULTS['source_port'])
self.assertEqual(options.stun_port, cli.DEFAULTS['stun_port'])
# TODO: Verify user arguments
if __name__ == '__main__':
unittest.main()
|
c4e0a132461dba798739b752a04fe3ff66af17ab | tests/high_level_curl_test.py | tests/high_level_curl_test.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result)
result = self.curl.get('/success')
self.assertEqual('success', result)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
| Fix test suite on python 3 - high level curl object returns result as bytes | Fix test suite on python 3 - high level curl object returns result as bytes
| Python | lgpl-2.1 | pycurl/pycurl,pycurl/pycurl,pycurl/pycurl | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result)
result = self.curl.get('/success')
self.assertEqual('success', result)
Fix test suite on python 3 - high level curl object returns result as bytes | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
| <commit_before>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result)
result = self.curl.get('/success')
self.assertEqual('success', result)
<commit_msg>Fix test suite on python 3 - high level curl object returns result as bytes<commit_after> | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result)
result = self.curl.get('/success')
self.assertEqual('success', result)
Fix test suite on python 3 - high level curl object returns result as bytes#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
| <commit_before>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result)
result = self.curl.get('/success')
self.assertEqual('success', result)
<commit_msg>Fix test suite on python 3 - high level curl object returns result as bytes<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4:et
# uses the high level interface
import curl
import unittest
from . import appmanager
setup_module, teardown_module = appmanager.setup(('app', 8380))
class RelativeUrlTest(unittest.TestCase):
def setUp(self):
self.curl = curl.Curl('http://localhost:8380/')
def tearDown(self):
self.curl.close()
def test_reuse(self):
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
result = self.curl.get('/success')
self.assertEqual('success', result.decode())
|
17d91eff7de5517aa89330a08f3c84fa46d02538 | tests/test_exc.py | tests/test_exc.py | # -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(
exc.CihaiException,
message="Make sure no one removes or renames base CihaiException",
):
raise exc.CihaiException()
with pytest.raises(Exception, message="Extends python base exception"):
raise exc.CihaiException()
| # -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(exc.CihaiException):
raise exc.CihaiException() # Make sure its base of CihaiException
with pytest.raises(Exception):
raise exc.CihaiException() # Extends python base exception
| Update exception test for pytest 5+ | Update exception test for pytest 5+
pytest 3 had message for raises, this is removed in current versions.
| Python | mit | cihai/cihai,cihai/cihai | # -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(
exc.CihaiException,
message="Make sure no one removes or renames base CihaiException",
):
raise exc.CihaiException()
with pytest.raises(Exception, message="Extends python base exception"):
raise exc.CihaiException()
Update exception test for pytest 5+
pytest 3 had message for raises, this is removed in current versions. | # -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(exc.CihaiException):
raise exc.CihaiException() # Make sure its base of CihaiException
with pytest.raises(Exception):
raise exc.CihaiException() # Extends python base exception
| <commit_before># -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(
exc.CihaiException,
message="Make sure no one removes or renames base CihaiException",
):
raise exc.CihaiException()
with pytest.raises(Exception, message="Extends python base exception"):
raise exc.CihaiException()
<commit_msg>Update exception test for pytest 5+
pytest 3 had message for raises, this is removed in current versions.<commit_after> | # -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(exc.CihaiException):
raise exc.CihaiException() # Make sure its base of CihaiException
with pytest.raises(Exception):
raise exc.CihaiException() # Extends python base exception
| # -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(
exc.CihaiException,
message="Make sure no one removes or renames base CihaiException",
):
raise exc.CihaiException()
with pytest.raises(Exception, message="Extends python base exception"):
raise exc.CihaiException()
Update exception test for pytest 5+
pytest 3 had message for raises, this is removed in current versions.# -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(exc.CihaiException):
raise exc.CihaiException() # Make sure its base of CihaiException
with pytest.raises(Exception):
raise exc.CihaiException() # Extends python base exception
| <commit_before># -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(
exc.CihaiException,
message="Make sure no one removes or renames base CihaiException",
):
raise exc.CihaiException()
with pytest.raises(Exception, message="Extends python base exception"):
raise exc.CihaiException()
<commit_msg>Update exception test for pytest 5+
pytest 3 had message for raises, this is removed in current versions.<commit_after># -*- coding: utf-8 -*-
import pytest
from cihai import exc
def test_base_exception():
with pytest.raises(exc.CihaiException):
raise exc.CihaiException() # Make sure its base of CihaiException
with pytest.raises(Exception):
raise exc.CihaiException() # Extends python base exception
|
1d0e75959f4511cbca10cb223b01c3a29d3660ec | tmhmm/__init__.py | tmhmm/__init__.py | from collections import Counter, defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
# just counts how many states there are per label
group_counts = Counter(label_map.values())
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
| from collections import defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
| Remove unused group counts variable | Remove unused group counts variable
| Python | mit | dansondergaard/tmhmm.py | from collections import Counter, defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
# just counts how many states there are per label
group_counts = Counter(label_map.values())
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
Remove unused group counts variable | from collections import defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
| <commit_before>from collections import Counter, defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
# just counts how many states there are per label
group_counts = Counter(label_map.values())
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
<commit_msg>Remove unused group counts variable<commit_after> | from collections import defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
| from collections import Counter, defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
# just counts how many states there are per label
group_counts = Counter(label_map.values())
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
Remove unused group counts variablefrom collections import defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
| <commit_before>from collections import Counter, defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
# just counts how many states there are per label
group_counts = Counter(label_map.values())
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
<commit_msg>Remove unused group counts variable<commit_after>from collections import defaultdict
import numpy as np
from tmhmm.model import parse
from tmhmm.hmm import viterbi, forward, backward
__all__ = ['predict']
GROUP_NAMES = ('i', 'm', 'o')
def predict(sequence, header, model_or_filelike, compute_posterior=True):
if isinstance(model_or_filelike, tuple):
model = model_or_filelike
else:
_, model = parse(open(model_or_filelike))
_, path = viterbi(sequence, *model)
if compute_posterior:
forward_table, constants = forward(sequence, *model)
backward_table = backward(sequence, constants, *model)
posterior = forward_table * backward_table
_, _, _, char_map, label_map, name_map = model
observations = len(sequence)
states = len(name_map)
table = np.zeros(shape=(observations, 3))
for i in range(observations):
group_probs = defaultdict(float)
for j in range(states):
group = label_map[j].lower()
group_probs[group] += posterior[i, j]
for k, group in enumerate(GROUP_NAMES):
table[i, k] = group_probs[group]
return path, table/table.sum(axis=1, keepdims=True)
return path
|
71e9b63f2c39e2b93bc570edc4214f4910b876f3 | ipkg/utils.py | ipkg/utils.py | import os
import json
import logging
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
with open(self.__file_path) as f:
self.update(json.load(f))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
| import os
import json
import logging
from .vfiles import vopen
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
self.update(json.load(vopen(self.__file_path)))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
# This will break if trying to call save() on a remote DictFile
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
| Use vopen() to make DictFile works when reading remote repository meta data | Use vopen() to make DictFile works when reading remote repository meta data
| Python | mit | pmuller/ipkg | import os
import json
import logging
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
with open(self.__file_path) as f:
self.update(json.load(f))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
Use vopen() to make DictFile works when reading remote repository meta data | import os
import json
import logging
from .vfiles import vopen
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
self.update(json.load(vopen(self.__file_path)))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
# This will break if trying to call save() on a remote DictFile
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
| <commit_before>import os
import json
import logging
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
with open(self.__file_path) as f:
self.update(json.load(f))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
<commit_msg>Use vopen() to make DictFile works when reading remote repository meta data<commit_after> | import os
import json
import logging
from .vfiles import vopen
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
self.update(json.load(vopen(self.__file_path)))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
# This will break if trying to call save() on a remote DictFile
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
| import os
import json
import logging
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
with open(self.__file_path) as f:
self.update(json.load(f))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
Use vopen() to make DictFile works when reading remote repository meta dataimport os
import json
import logging
from .vfiles import vopen
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
self.update(json.load(vopen(self.__file_path)))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
# This will break if trying to call save() on a remote DictFile
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
| <commit_before>import os
import json
import logging
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
with open(self.__file_path) as f:
self.update(json.load(f))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
<commit_msg>Use vopen() to make DictFile works when reading remote repository meta data<commit_after>import os
import json
import logging
from .vfiles import vopen
LOGGER = logging.getLogger(__name__)
class DictFile(dict):
"""A ``dict``, storable as a JSON file.
"""
def __init__(self, file_path):
super(DictFile, self).__init__()
self.__file_path = file_path
self.reload()
def reload(self):
if os.path.isfile(self.__file_path):
LOGGER.debug('Loading %s', self.__file_path)
self.update(json.load(vopen(self.__file_path)))
def clear(self):
"""Force the dictionary to be empty.
"""
if os.path.isfile(self.__file_path):
os.unlink(self.__file_path)
super(DictFile, self).clear()
def save(self):
LOGGER.debug('Writing %s', self.__file_path)
# This will break if trying to call save() on a remote DictFile
with open(self.__file_path, 'w') as f:
json.dump(self, f, indent=4)
|
ff8f1067ac95a8f3fbb4c02e510da033623edeee | gargoyle/helpers.py | gargoyle/helpers.py | """
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.META = {
'REMOTE_ADDR': ip_address,
} | """
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.GET = {}
self.POST = {}
self.COOKIES = {}
self.META = {
'REMOTE_ADDR': ip_address,
} | Set POST/GET/COOKIES on MockRequest so repr works | Set POST/GET/COOKIES on MockRequest so repr works
| Python | apache-2.0 | disqus/gutter-django,nkovshov/gargoyle,nkovshov/gargoyle,nkovshov/gargoyle,frewsxcv/gargoyle,brilliant-org/gargoyle,frewsxcv/gargoyle,YPlan/gargoyle,roverdotcom/gargoyle,monokrome/gargoyle,monokrome/gargoyle,disqus/gutter,vikingco/gargoyle,disqus/gutter-django,blueprinthealth/gargoyle,YPlan/gargoyle,graingert/gutter-django,disqus/gargoyle,disqus/gutter-django,brilliant-org/gargoyle,Raekkeri/gargoyle,kalail/gutter,graingert/gutter-django,vikingco/gargoyle,kalail/gutter,Raekkeri/gargoyle,blueprinthealth/gargoyle,disqus/gargoyle,vikingco/gargoyle,frewsxcv/gargoyle,disqus/gutter,roverdotcom/gargoyle,zapier/gargoyle,blueprinthealth/gargoyle,graingert/gutter-django,disqus/gargoyle,monokrome/gargoyle,disqus/gutter-django,roverdotcom/gargoyle,YPlan/gargoyle,kalail/gutter,Raekkeri/gargoyle,zapier/gargoyle,brilliant-org/gargoyle | """
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.META = {
'REMOTE_ADDR': ip_address,
}Set POST/GET/COOKIES on MockRequest so repr works | """
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.GET = {}
self.POST = {}
self.COOKIES = {}
self.META = {
'REMOTE_ADDR': ip_address,
} | <commit_before>"""
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.META = {
'REMOTE_ADDR': ip_address,
}<commit_msg>Set POST/GET/COOKIES on MockRequest so repr works<commit_after> | """
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.GET = {}
self.POST = {}
self.COOKIES = {}
self.META = {
'REMOTE_ADDR': ip_address,
} | """
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.META = {
'REMOTE_ADDR': ip_address,
}Set POST/GET/COOKIES on MockRequest so repr works"""
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.GET = {}
self.POST = {}
self.COOKIES = {}
self.META = {
'REMOTE_ADDR': ip_address,
} | <commit_before>"""
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.META = {
'REMOTE_ADDR': ip_address,
}<commit_msg>Set POST/GET/COOKIES on MockRequest so repr works<commit_after>"""
gargoyle.helpers
~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from django.http import HttpRequest
class MockRequest(HttpRequest):
"""
A mock request object which stores a user
instance and the ip address.
"""
def __init__(self, user=None, ip_address=None):
from django.contrib.auth.models import AnonymousUser
self.user = user or AnonymousUser()
self.GET = {}
self.POST = {}
self.COOKIES = {}
self.META = {
'REMOTE_ADDR': ip_address,
} |
347e3f9092bf1f48e116cafceef8db255e293b1f | test/test_packages.py | test/test_packages.py | import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
| import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("suckless-tools"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
| Add new packages to tests | Add new packages to tests
| Python | mit | wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build | import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
Add new packages to tests | import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("suckless-tools"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
| <commit_before>import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
<commit_msg>Add new packages to tests<commit_after> | import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("suckless-tools"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
| import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
Add new packages to testsimport pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("suckless-tools"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
| <commit_before>import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
<commit_msg>Add new packages to tests<commit_after>import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atsar"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-engine"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("python"),
("python-pip"),
("suckless-tools"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("vim-addon-manager"),
("vim-puppet"),
("vim-syntax-docker"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
|
7c1a6fdc82ccdf8469d95e1e77897fab6e25d551 | hammock/__init__.py | hammock/__init__.py | import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection):
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links | import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection) and v != Collection:
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links | Make sure we don't include Collection when pulling collections from a module | Make sure we don't include Collection when pulling collections from a module
| Python | mit | cooper-software/cellardoor | import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection):
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_linksMake sure we don't include Collection when pulling collections from a module | import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection) and v != Collection:
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links | <commit_before>import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection):
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links<commit_msg>Make sure we don't include Collection when pulling collections from a module<commit_after> | import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection) and v != Collection:
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links | import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection):
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_linksMake sure we don't include Collection when pulling collections from a moduleimport types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection) and v != Collection:
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links | <commit_before>import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection):
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links<commit_msg>Make sure we don't include Collection when pulling collections from a module<commit_after>import types
from version import __version__
from .model import Model
from .collection import Collection
class Hammock(object):
def __init__(self, collections=(), authenticators=(), storage=None):
if type(collections) == types.ModuleType:
collection_classes = []
for k,v in collections.__dict__.items():
try:
if issubclass(v, Collection) and v != Collection:
collection_classes.append(v)
except TypeError:
pass
else:
collection_classes = collections
entities = set()
self.collections_by_class_name = {}
for collection_cls in collection_classes:
entities.add(collection_cls.entity)
collection = collection_cls(storage)
self.collections_by_class_name[collection_cls.__name__] = collection
setattr(self, collection_cls.plural_name, collection)
self.model = Model(storage, entities)
for collection in self.collections_by_class_name.values():
new_links = {}
if collection.links:
for k, v in collection.links.items():
if not isinstance(v, basestring):
v = v.__name__
referenced_collection = self.collections_by_class_name.get(v)
new_links[k] = referenced_collection
collection.links = new_links |
714611e3fdddc4f2cacb5be4753d22322cafd312 | rest.py | rest.py | import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
# json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
| import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
| Set it to consume rewards | Set it to consume rewards
| Python | mit | iuliux/FamilyQuest-RPi | import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
# json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
Set it to consume rewards | import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
| <commit_before>import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
# json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
<commit_msg>Set it to consume rewards<commit_after> | import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
| import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
# json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
Set it to consume rewardsimport json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
| <commit_before>import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
# json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
<commit_msg>Set it to consume rewards<commit_after>import json
import httplib2
from config import SERVER
http = httplib2.Http()
# print json.loads(http.request("http://172.28.101.30:8080/api/v1/family/1/?format=json", 'GET')[1])
def json_result(url, method='GET'):
response = http.request('http://%s%s?format=json' % (SERVER, url), method)[1]
# print 'URL', url
# print 'RESP', json.loads(response)
if response:
return json.loads(http.request('http://%s%s?format=json' % (SERVER, url), method)[1])
else:
return {}
def poll():
family = json_result('/api/v1/family/1/')
members = []
for m in family[u'members']:
member = json_result(m)
rewards = filter(lambda r: not r[u'consumed'],
[json_result(r) for r in member[u'rewards']])
for r in rewards:
json_result(r[u'resource_uri'] + 'consume/')
pass
# List of triplets (first name, username, list of rewards to consume)
members.append((member[u'first_name'], member[u'username'], rewards))
return members
# print poll()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.